query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Test suite including all test suites
def test_suite(): testSuite = unittest.TestSuite() testSuite.addTest(test_spec("test_cmd_parser")) return testSuite
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def suite():\n test_suite = unittest.TestSuite()\n test_suite.addTest(unittest.makeSuite(globalOptimizerTest))\n test_suite.addTest(unittest.makeSuite(recursiveStepTest))\n return test_suite", "def main():\n # add all new test suites per test module here\n suite_date = test_date.suite()\n suite_ng = test_ng.suite()\n suite_page = test_page.suite()\n suite_container = test_container.suite()\n\n # add the suite to be tested here\n alltests = unittest.TestSuite((suite_date,\n suite_ng,\n suite_page,\n suite_container))\n\n # run the suite\n runner = unittest.TextTestRunner()\n runner.run(alltests)", "def suite():\n\tts = unittest.TestSuite()\n\tfor test_module in __all__:\n\t\tm = importlib.import_module(\"pyroclast.test.\" + test_module)\n\t\tfor n in dir(m):\n\t\t\tc = getattr(m, n)\n\t\t\tif is_test_case(c):\n\t\t\t\ts = unittest.TestLoader().loadTestsFromTestCase(c)\n\t\t\t\tts.addTests(s)\n\treturn ts", "def test_suite():\n testSuite = unittest.TestSuite()\n testSuite.addTest(test_polarization.test_suite())\n testSuite.addTest(test_xray.test_suite())\n testSuite.addTest(test_emspectrum.test_suite())\n return testSuite", "def suite():\n return unittest.TestLoader().loadTestsFromName(__name__)", "def suite():\n # Get a list of all files.\n files = glob.glob(os.path.join(os.path.dirname(os.path.abspath(\n inspect.getfile(inspect.currentframe()))), \"test*.py\"))\n files = [os.path.splitext(os.path.basename(_i))[0] for _i in files]\n\n modules = []\n # try to import all files.\n for module in files:\n try:\n module = __import__(module, globals(), locals())\n except:\n warnings.warn(\"Module %s could not be imported\" % module)\n continue\n modules.append(module)\n\n suite = unittest.TestSuite()\n for module in modules:\n for attrib in dir(module):\n value = getattr(module, attrib)\n try:\n if issubclass(value, unittest.TestCase):\n suite.addTest(unittest.makeSuite(value, \"test\"))\n except:\n pass\n return suite", "def test_suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(TestIntegration))\n suite.addTest(unittest.makeSuite(TestSection))\n return suite", "def test_suite():\n return unittest.defaultTestLoader.loadTestsFromName(__name__)", "def suite():\n return unittest.makeSuite(OpenedTestCase)", "def suite():\n\n utilsTests.init()\n\n suites = []\n suites += unittest.makeSuite(FitExponentialTestCase)\n suites += unittest.makeSuite(utilsTests.MemoryTestCase)\n return unittest.TestSuite(suites)", "def suite():\r\n\r\n current = os.path.dirname(os.path.realpath(__file__))\r\n top = os.path.normpath(os.path.join(current, \"..\", \"..\"))\r\n return unittest.TestLoader().discover(current, pattern='test_*.py', top_level_dir=top)", "def suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(GetDailyReportV1TestCase))\n suite.addTest(unittest.makeSuite(GetDailyReportV2TestCase))\n return suite", "def suite():\n tsuite = unittest.TestSuite()\n tsuite.addTest(unittest.defaultTestLoader.loadTestsFromModule(sys.modules[__name__]))\n tsuite.addTest(unittest.defaultTestLoader.loadTestsFromModule(commandtests))\n tsuite.addTest(unittest.defaultTestLoader.loadTestsFromModule(locktests))\n return tsuite", "def suite():\n suite_obj = unittest.TestSuite()\n suite_obj.addTest(TestEssentials())\n return suite_obj", "def suite():\n tests.init()\n\n suites = []\n suites += unittest.makeSuite(MeasureSourcesTestCase)\n suites += unittest.makeSuite(ForcedMeasureSourcesTestCase)\n suites += unittest.makeSuite(tests.MemoryTestCase)\n return unittest.TestSuite(suites)", "def suite():\n loader = unittest.TestLoader()\n mysuite = unittest.TestSuite()\n mysuite.addTest(loader.loadTestsFromTestCase(TestUtils))\n \n return mysuite", "def test_suite():\n suite = unittest.TestSuite()\n suite.addTests(unittest.makeSuite(PrimesTests))\n suite.addTests(unittest.makeSuite(OtherTests))\n return suite", "def suite():\n loader = unittest.TestLoader()\n testsuite = loader.loadTestsFromModule(sys.modules[__name__])\n return testsuite", "def suite():\n\n utilsTests.init()\n\n suites = []\n suites += unittest.makeSuite(SourceHeavyFootprintTestCase)\n suites += unittest.makeSuite(utilsTests.MemoryTestCase)\n return unittest.TestSuite(suites)", "def testsuite():\n loader = unittest.TestLoader()\n ts = unittest.TestSuite()\n ts.addTests(loader.loadTestsFromTestCase(api_server_test.ApiServerTestCase))\n ts.addTests(loader.loadTestsFromTestCase(codec_test.CodecTestCase))\n return ts", "def suite():\n # patch it to work here\n package_def = 'app.test'\n\n suite = unittest.TestSuite()\n\n for other_suite in iter_suites(package_def):\n suite.addTest(other_suite)\n return suite", "def testsuite():\n return unittest.TestLoader().discover(os.path.dirname(__file__))", "def suite():\n\n utilsTests.init()\n\n suites = []\n suites += unittest.makeSuite(StatisticsTestCase)\n suites += unittest.makeSuite(utilsTests.MemoryTestCase)\n return unittest.TestSuite(suites)", "def suite():\n \n suite = unittest.TestSuite()\n suite.addTest(FibonacciTestCase)\n suite.addTest(IntervalTestCase)\n suite.addTest(GameTestCase)\n return suite", "def suite():\n\tsuite1 = unittest.makeSuite(TestCrop, 'test')\n\tsuite2 = unittest.makeSuite(TestDiag, 'test')\n\tsuite3 = unittest.makeSuite(TestEye, 'test')\n\tsuite4 = unittest.makeSuite(TestMinDim, 'test') \n\tsuite5 = unittest.makeSuite(TestNnz, 'test')\n\tsuite6 = unittest.makeSuite(TestOnes, 'test')\n\tsuite7 = unittest.makeSuite(TestRand, 'test')\n\tsuite8 = unittest.makeSuite(TestRandSym, 'test')\n\tsuite9 = unittest.makeSuite(TestReplace, 'test')\n\tsuite10 = unittest.makeSuite(TestTriu, 'test')\n\tsuite11 = unittest.makeSuite(TestTril, 'test')\n\treturn unittest.TestSuite((suite1, suite2, suite3, suite4, suite5, suite6, suite7, suite8, suite9, suite10, suite11))", "def testsuite():\n \n tests = unittest.TestSuite()\n\n parse_tests = unittest.makeSuite(ParseTestCase, 'test')\n tests = unittest.TestSuite( (tests, parse_tests) )\n\n return tests", "def suite():\n tests.init()\n\n suites = []\n suites += unittest.makeSuite(AngleTestCase)\n suites += unittest.makeSuite(tests.MemoryTestCase)\n return unittest.TestSuite(suites)", "def suite():\n tests.init()\n\n suites = []\n suites += unittest.makeSuite(DeconvolvedPsfPhotometryTestCase)\n suites += unittest.makeSuite(tests.MemoryTestCase)\n return unittest.TestSuite(suites)", "def suite():\n\n lsst_tests.init()\n\n suites = []\n suites += unittest.makeSuite(DipoleFitAlgorithmTest)\n suites += unittest.makeSuite(DipoleFitTaskTest)\n suites += unittest.makeSuite(DipoleFitTaskEdgeTest)\n suites += unittest.makeSuite(lsst_tests.MemoryTestCase)\n return unittest.TestSuite(suites)", "def suite():\n utilsTests.init()\n suites = []\n suites += unittest.makeSuite(TestTrackingDb)\n return unittest.TestSuite(suites)", "def test_suite():\n\tsuite = unittest.TestSuite()\n\tsuite.addTest(unittest.makeSuite(TestPloneDbFormsManager))\n\treturn suite", "def _suite(self):\n import mpi.test_application\n import mpi.test_communicator\n import mpi.test_launcher\n\n test_cases = []\n for mod in [\n mpi.test_application,\n mpi.test_communicator,\n mpi.test_launcher,\n ]:\n test_cases += mod.test_classes()\n \n suite = unittest.TestSuite()\n for test_case in test_cases:\n suite.addTest(unittest.makeSuite(test_case))\n\n return suite", "def suite():\n test_suite = unittest.TestSuite()\n test_suite.addTest(unittest.makeSuite(TestReversi))\n test_suite.addTest(unittest.makeSuite(TestGuessNumberGame))\n test_suite.addTest(unittest.makeSuite(TestConnectFourGame))\n test_suite.addTest(unittest.makeSuite(TestBuscamina))\n test_suite.addTest(unittest.makeSuite(TestGame))\n test_suite.addTest(unittest.makeSuite(TestDamaGame))\n test_suite.addTest(unittest.makeSuite(TestTateti))\n test_suite.addTest(unittest.makeSuite(TestGameBase))\n test_suite.addTest(unittest.makeSuite(TestFourNumber))\n test_suite.addTest(unittest.makeSuite(test_game_generala))\n test_suite.addTest(unittest.makeSuite(test_categories))\n test_suite.addTest(unittest.makeSuite(test_player))\n test_suite.addTest(unittest.makeSuite(test_throw_class))\n test_suite.addTest(unittest.makeSuite(test_throw_dice))\n test_suite.addTest(unittest.makeSuite(TestBets))\n test_suite.addTest(unittest.makeSuite(TestDeck))\n test_suite.addTest(unittest.makeSuite(TestBlackjackGame))\n test_suite.addTest(unittest.makeSuite(TestHands))\n test_suite.addTest(unittest.makeSuite(PokerTest))\n test_suite.addTest(unittest.makeSuite(PokerGameTest))\n test_suite.addTest(unittest.makeSuite(TestBattleship))\n test_suite.addTest(unittest.makeSuite(TestBoard))\n test_suite.addTest(craps_suite())\n test_suite.addTest(sudoku_suite())\n test_suite.addTest(roulette_suite())\n test_suite.addTest(dungeon_suite())\n test_suite.addTest(unittest.makeSuite(TestSenku))\n test_suite.addTest(unittest.makeSuite(TestAhorcado))\n test_suite.addTest(unittest.makeSuite(TestHanoiTower))\n return test_suite", "def suite():\n return unittest.makeSuite(ClientsTestCase)", "def suite():\n test_suite = unittest.TestSuite()\n test_suite.addTest(unittest.makeSuite(TestFunctionalSPF, \"test\"))\n return test_suite", "def suite():\n utilsTests.init()\n\n suites = [\n unittest.makeSuite(RingsTestCase),\n unittest.makeSuite(utilsTests.MemoryTestCase),\n ]\n\n return unittest.TestSuite(suites)", "def suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(UpdateV1TestCase))\n return suite", "def suite():\n\n utilsTests.init()\n\n suites = []\n suites += unittest.makeSuite(HscDistortionTestCase)\n suites += unittest.makeSuite(utilsTests.MemoryTestCase)\n return unittest.TestSuite(suites)", "def suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(ListV1TestCase))\n return suite", "def suite_confopt_test():\n loader = unittest.TestLoader()\n suite = unittest.TestSuite(loader.loadTestsFromTestCase(ConfOptTest))\n return suite", "def test_suite():\n testSuite = unittest.TestSuite()\n testSuite.addTest(test_h5fs(\"test_mode\"))\n testSuite.addTest(test_h5fs(\"test_path_splitting\"))\n testSuite.addTest(test_h5fs(\"test_link_mixing\"))\n return testSuite", "def make_suite():\n\n loader = unittest.TestLoader()\n suite = unittest.TestSuite()\n for test_class in test_classes():\n tests = loader.loadTestsFromTestCase(test_class)\n suite.addTests(tests)\n return suite", "def make_suite():\n\n loader = unittest.TestLoader()\n suite = unittest.TestSuite()\n for test_class in test_classes():\n tests = loader.loadTestsFromTestCase(test_class)\n suite.addTests(tests)\n return suite", "def suite():\n\n lsst.utils.tests.init()\n\n suites = []\n suites += unittest.makeSuite(SchemaTestCase)\n suites += unittest.makeSuite(lsst.utils.tests.MemoryTestCase)\n return unittest.TestSuite(suites)", "def suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(CreateV1TestCase))\n suite.addTest(unittest.makeSuite(CreateV2TestCase))\n return suite", "def suite():\n loader = unittest.TestLoader()\n mysuite = unittest.TestSuite()\n mysuite.addTest(loader.loadTestsFromTestCase(TestWorldComposite))\n\n return mysuite", "def suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(GetAccountReportV1TestCase))\n return suite", "def suite(self):\n return TestLoader().loadTestsFromTestCase(SourcehandlerTest)", "def test_suite():\n testSuite = unittest.TestSuite()\n testSuite.addTest(test_regulargrid(\"test_indexing\"))\n testSuite.addTest(test_regulargrid(\"test_interpolate\"))\n return testSuite", "def run():\n\tsubsuite_list = []\n\tfor _, modname, _ in pkgutil.iter_modules(test.__path__):\n\t\tif modname.startswith(\"test_\"):\n\t\t\tmodule = importlib.import_module('test.' + modname)\n\t\t\tsubsuite = unittest.TestLoader().loadTestsFromModule(module)\n\t\t\tsubsuite_list.append(subsuite)\n\tsuite = unittest.TestSuite(subsuite_list)\n\n\tprint(\"Testing:\\n\")\n\tunittest.TextTestRunner(verbosity=2).run(suite)", "def main():\n run_test_all()", "def collect_cases(self, suite=False):\n cases = unittest.TestSuite()\n\n if suite:\n test_suites = []\n for file in os.listdir('.'):\n if self.suite_path in file:\n if os.path.isdir(file):\n test_suites.append(file)\n\n for test_suite in test_suites:\n self._collect_cases(cases, top_dir=test_suite)\n else:\n self._collect_cases(cases, top_dir=None)\n return cases", "def runAll():\n\n loader = unittest.TestLoader()\n test_dir = pkg_resources.resource_filename('frvcpy.test','.')\n suite = loader.discover(test_dir)\n\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite)", "def test_suite():\n testSuite = unittest.TestSuite()\n\n testSuite.addTest(test_classfactory(\"test_inheritance\"))\n return testSuite", "def suite():\n\n testSuite = common.unittest.TestSuite()\n\n cdatafuncs = [niclassdata] # non-indexing data tests\n cdatafuncs.append(iclassdata) # indexing data tests\n\n heavy = common.heavy\n # Choose which tests to run in classes with autogenerated tests.\n if heavy:\n autoprefix = 'test' # all tests\n else:\n autoprefix = 'test_l' # only light tests\n\n niter = 1\n for i in range(niter):\n # Tests on query data.\n for cdatafunc in cdatafuncs:\n for cdata in cdatafunc():\n class_ = eval(cdata[0])\n if heavy or not class_.heavy:\n suite_ = common.unittest.makeSuite(class_,\n prefix=autoprefix)\n testSuite.addTest(suite_)\n # Tests on query usage.\n testSuite.addTest(common.unittest.makeSuite(ScalarTableUsageTestCase))\n testSuite.addTest(common.unittest.makeSuite(MDTableUsageTestCase))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage1))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage2))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage3))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage4))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage5))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage6))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage7))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage8))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage9))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage10))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage11))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage12))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage13))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage14))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage15))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage16))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage17))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage18))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage19))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage20))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage21))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage22))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage23))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage24))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage25))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage26))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage27))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage28))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage29))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage30))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage31))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage32))\n\n return testSuite", "def test_suite():\n suite = unittest.TestSuite()\n suite.addTests(\n [\n layered(\n doctest.DocFileSuite(\"behaviors.rst\"),\n layer=testing.TEXT_INTEXER_INTEGRATION_TESTING,\n ),\n ]\n )\n return suite", "def master_test_suite( pkg_mod_iter ):\n master_suite= unittest.TestSuite()\n for package, module_iter in pkg_mod_iter:\n for filename, module in module_iter:\n print( package+\".\"+module )\n suite= doctest.DocTestSuite( package+\".\"+module )\n print( \" \", suite )\n master_suite.addTests( suite )\n runner= unittest.TextTestRunner( verbosity=1 )\n runner.run( master_suite )", "def starlib_test_suite():\n\n loader = unittest.TestLoader()\n suite = unittest.TestSuite()\n \n suite.addTests(loader.loadTestsFromModule(test_star))\n suite.addTests(loader.loadTestsFromModule(test_camera))\n suite.addTests(loader.loadTestsFromModule(test_image))\n suite.addTests(loader.loadTestsFromModule(test_star_database))\n suite.addTests(loader.loadTestsFromModule(test_kdtree))\n\n return suite", "def make_suite():\n suite = unittest.TestSuite()\n return suite", "def getTestSuite():\n test_suite = unittest.TestSuite([])\n\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestDistReaders))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestPySnpTools))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestDistributedBed))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestFileCache))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestUtilTools))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestIntRangeSet))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestSnpDocStrings))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestPstDocStrings))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestKrDocStrings))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestSnpGen))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestGenerate))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestExampleFile))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestPstMemMap))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestSnpMemMap))\n test_suite.addTests(NaNCNCTestCases.factory_iterator())\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestPstReader))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestKernelReader))\n\n return test_suite", "def collectTests(self, global_ctx):\n pass", "def main_test():\n full = unittest.TestSuite()\n full.addTest(unittest.makeSuite(TestToolOptions))\n full.addTest(unittest.makeSuite(TestBadConfiguration))\n full.addTest(unittest.makeSuite(TestBasicEndpoints))\n full.addTest(unittest.makeSuite(TestMultipleEPG))\n full.addTest(unittest.makeSuite(TestBasicExistingEndpoints))\n full.addTest(unittest.makeSuite(TestBasicExistingEndpointsAddPolicyLater))\n full.addTest(unittest.makeSuite(TestExportPolicyRemoval))\n full.addTest(unittest.makeSuite(TestBasicEndpointsWithContract))\n full.addTest(unittest.makeSuite(TestBasicEndpointMove))\n full.addTest(unittest.makeSuite(TestPolicyChangeProvidedContract))\n full.addTest(unittest.makeSuite(TestChangeL3Out))\n full.addTest(unittest.makeSuite(TestDuplicates))\n full.addTest(unittest.makeSuite(TestDuplicatesTwoL3Outs))\n full.addTest(unittest.makeSuite(TestDeletions))\n\n unittest.main()", "def suite_utilstest():\n loader = unittest.TestLoader()\n suite = unittest.TestSuite(loader.loadTestsFromTestCase(UtilsTest))\n return suite", "def run_test_suites(self, suites):\n for suite_class in suites:\n test_suite = suite_class(self)\n results = test_suite.run()\n self.test_results += results", "def test_suite():\n tests = [doctest.DocFileSuite(file,\n optionflags=doctest.ELLIPSIS) for file in docfiles]\n return unittest.TestSuite(tests)", "def suite():\n suite = unittest.TestSuite()\n suite.addTest(ServicesMenuDropdownListTestCase(\"testServicesMenuDropdownListItems\"))\n return suite", "def test_suite():\n test_loader = unittest.TestLoader()\n # Read in unit tests\n test_suite = test_loader.discover('tests')\n\n # Read in doctests from README\n test_suite.addTests(doctest.DocFileSuite('README.md',\n optionflags=doctest.ELLIPSIS))\n return test_suite", "def runTestSuites(self):\n \n self.testsuitesToXML()\n \n\n tss = []\n jobStatus = {}\n for t in self.testsuites:\n d = t.testsuitedir\n runner = os.path.join(self.basepath, 'testSuiteRunner.py')\n tdir = os.path.join(d, 'testsuite.out')\n cmd = 'python %s %s>& %s' % (runner, d,tdir)\n #print 'about to popen the cmd: %s' % cmd\n tss.append((t.name, popen2.Popen3(cmd)))\n jobStatus[t.name] = ('running', nowSecs())\n ntests = len(tss)\n printJobStatus(jobStatus)\n\n while tss:\n toRemove = [p for p in tss if p[1].poll() != -1]\n if toRemove:\n [tss.remove(p) for p in toRemove]\n for p in toRemove:\n jobStatus[p[0]] = ('completed', nowSecs())\n\n printJobStatus(jobStatus)\n time.sleep(10)\n\n print 'all %d tests have completed' % ntests", "def unittest():\n from a6test import test_all\n test_all()", "def local():\n suite = ServiceTestSuite()\n suite.addTest(unittest.makeSuite(Test, 'test_local'))\n return suite", "def run_all_unit_tests(cls):\n suites_list = []\n for test_class in cls.TESTS:\n suite = unittest.TestLoader().loadTestsFromTestCase(test_class)\n suites_list.append(suite)\n result = unittest.TextTestRunner().run(unittest.TestSuite(suites_list))\n if not result.wasSuccessful() or result.errors:\n raise Exception(result)", "def runalltests():\n doctest.testmod()", "def makeTestSuiteV201109():\n suite = unittest.TestSuite()\n suite.addTests(unittest.makeSuite(TrafficEstimatorServiceTestV201109))\n return suite", "def find_all_tests(suite):\n suites = [suite]\n while suites:\n s = suites.pop()\n try:\n suites.extend(s)\n except TypeError:\n yield s, '%s.%s.%s' % (\n s.__class__.__module__,\n s.__class__.__name__,\n s._testMethodName\n )", "def suite():\n \n return unittest.TestSuite([\n LocationClassTestCase,\n LocationInstanceTestCase,\n ])", "def RunTestAll(ss):\n ss.StopNow = False\n ss.TestAll()\n ss.Stopped()", "def desistar_test_suite():\n from os.path import dirname\n desistar_dir = dirname(dirname(__file__))\n # print(desistar_dir)\n return unittest.defaultTestLoader.discover(desistar_dir,\n top_level_dir=dirname(desistar_dir))", "def setUpSuite():\n global _output_dir\n global _suite_configured\n\n if _suite_configured:\n return\n\n def remove_output_dir():\n global _output_dir\n if _output_dir != '':\n try:\n shutil.rmtree(_output_dir)\n except FileNotFoundError:\n pass\n\n atexit.register(remove_output_dir)\n _output_dir = tempfile.mkdtemp(dir=TESTS_DIR)\n\n os.environ['VOC_BUILD_DIR'] = os.path.join(_output_dir, 'build')\n os.environ['VOC_DIST_DIR'] = os.path.join(_output_dir, 'dist')\n\n # If the code has been precompiled, we don't have to\n # compile it as part of the test suite setup.\n precompile = os.environ.get('PRECOMPILE', 'true').lower() == 'true'\n if not precompile:\n _suite_configured = True\n return\n\n proc = subprocess.Popen(\n \"ant java\",\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n shell=True,\n )\n\n try:\n out, err = proc.communicate(timeout=30)\n except subprocess.TimeoutExpired:\n proc.kill()\n out, err = proc.communicate()\n raise\n\n if proc.returncode != 0:\n raise Exception(\"Error compiling java sources: \" + out.decode('ascii'))\n\n _suite_configured = True", "def main():\n try:\n unittest.main(testLoader=BetterLoader(), defaultTest='suite')\n except Exception:\n import sys\n import traceback\n traceback.print_exc()\n sys.exit(1)", "def test_iter(cls, suite):\n for test in suite:\n if isinstance(test, unittest.TestSuite):\n for t in cls.test_iter(test):\n yield t\n else:\n yield test", "def test_iter(cls, suite):\n for test in suite:\n if isinstance(test, unittest.TestSuite):\n for t in cls.test_iter(test):\n yield t\n else:\n yield test", "def run_suite(*test_classes):\n\n loader = unittest.TestLoader()\n suite = unittest.TestSuite()\n for test_class in test_classes:\n tests = loader.loadTestsFromTestCase(test_class)\n suite.addTests(tests)\n if suite is not None:\n unittest.TextTestRunner(verbosity=2).run(suite)\n return", "def run_tests():\n testfiles = ['tests.test_overall']\n exclude = ['__init__.py', 'test_overall.py']\n for t in glob(pjoin('tests', '*.py')):\n if True not in [t.endswith(ex) for ex in exclude]:\n if basename(t).startswith('test_'):\n testfiles.append('tests.%s' % splitext(basename(t))[0])\n\n suites = []\n for file in testfiles:\n __import__(file)\n suites.append(sys.modules[file].suite)\n\n tests = unittest.TestSuite(suites)\n runner = unittest.TextTestRunner(verbosity=2)\n\n # Disable logging output\n logging.basicConfig(level=100)\n logging.disable(100)\n\n result = runner.run(tests)\n return result", "def filter_suite(self, suite):\n if isinstance(suite, unittest.TestSuite):\n suite_copy = self.suiteClass()\n for sub in suite:\n if isinstance(sub, unittest.TestSuite):\n suite_copy.addTest(self.filter_suite(sub))\n else:\n if self.is_healthcheck(sub):\n suite_copy.addTest(sub)\n elif self.is_healthcheck(suite):\n suite_copy = suite.copy()\n return suite_copy", "def do_TestSuite(suite):\n cl = suite.__class__\n name = mangle_test_name(suite.test_name)\n dbsuite = get_or_create_TestSuite(name=name, valid=True, \n suiteimplementation=\"%s.%s\" % (cl.__module__, cl.__name__))\n dbsuite.subsuites = []\n dbsuite.testcases = []\n\n memo = set()\n for testentry in suite:\n if testentry.inst.__class__ in memo:\n continue\n memo.add(testentry.inst.__class__)\n if isinstance(testentry, core.SuiteEntry):\n newsuite = do_TestSuite(testentry.inst)\n dbsuite.subsuites.append(newsuite)\n else: # a TestEntry or TestSeriesEntry\n dbcase = do_TestEntry(testentry)\n dbsuite.testcases.append(dbcase)\n _dbsession.commit()\n return dbsuite", "def createTestSuite():\n import tests.functional.tests as functional\n return unittest.TestLoader().loadTestsFromModule(functional)", "def suite_config_test():\n loader = unittest.TestLoader()\n suite = unittest.TestSuite(loader.loadTestsFromTestCase(ConfigTest))\n return suite", "def runtests():\n #- Load all TestCase classes from desistar/test/test_*.py\n tests = desistar_test_suite()\n #- Run them\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test_generate_all_testing(self):\n pass", "def runTests(self):\n \n pass", "def gen_suite(tests):\n cases = [gen_case(test) for test in tests]\n return {\n 'cases': cases,\n 'scored': True,\n 'setup': '',\n 'teardown': '',\n 'type': 'doctest'\n }", "def suite(module_names):\n suite = unittest.TestSuite()\n for mod in load_modules(module_names):\n suite.addTest(module_suite(mod))\n return suite", "def tests():\n api.local('nosetests')", "def getTestSuite():\n\n suite1 = unittest.TestLoader().loadTestsFromTestCase(TestDataProcs)\n return unittest.TestSuite([suite1,suite2])", "def __test_suites(self, report_url: str) -> Sequence[Element]:\n root = self.__element_tree(report_url)\n return root.findall('suite')", "def tests():", "def __main() :\n launchTests()", "def runAllTests():\n\tttr = unittest.TextTestRunner(verbosity=3).run(suite())\n\tnTests = ttr.testsRun + len(ttr.skipped)\n\tprint(\"Report:\")\n\tprint(\"\\t\" + str(len(ttr.failures)) + \"/\" + str(nTests) + \" failed\")\n\tprint(\"\\t\" + str(len(ttr.errors)) + \"/\" + str(nTests) + \" errors\")\n\tprint(\"\\t\" + str(len(ttr.skipped)) + \"/\" + str(nTests) + \" skipped\")", "def test_suite():\n return doctest.DocFileSuite(\n '../README.rst',\n 'server.rst',\n 'client.rst',\n optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)", "def get_suite():\n import doctest\n import sys\n return doctest.DocTestSuite( sys.modules[__name__] )" ]
[ "0.79815567", "0.79566133", "0.79299814", "0.7830009", "0.78079045", "0.77646685", "0.7761804", "0.7670637", "0.7668768", "0.7660842", "0.7646167", "0.7619189", "0.7598848", "0.7583602", "0.75772774", "0.7562298", "0.75592405", "0.752341", "0.75119865", "0.75030947", "0.747579", "0.7461649", "0.7460984", "0.74258256", "0.7413234", "0.7407263", "0.7399274", "0.7396716", "0.7382312", "0.73726803", "0.7354318", "0.73374563", "0.73316747", "0.7303075", "0.7301389", "0.72940147", "0.726943", "0.7268616", "0.7259126", "0.7233083", "0.7209904", "0.72046953", "0.72046953", "0.7189896", "0.7158548", "0.71285665", "0.7098031", "0.7090216", "0.70577824", "0.7053617", "0.7001574", "0.6989955", "0.6986327", "0.69842386", "0.6968736", "0.6955304", "0.69182104", "0.69163156", "0.6901534", "0.68880373", "0.684972", "0.6842391", "0.68242776", "0.67988837", "0.6781839", "0.6763818", "0.6732569", "0.6691777", "0.66739464", "0.6656625", "0.6640775", "0.66388303", "0.6635329", "0.663517", "0.6617143", "0.6612706", "0.6600624", "0.65847576", "0.65740496", "0.6539671", "0.6539671", "0.6527195", "0.6526578", "0.65255433", "0.6521377", "0.6506619", "0.6462713", "0.6461544", "0.64564604", "0.6444731", "0.6416155", "0.6409785", "0.64005154", "0.6394458", "0.6385531", "0.63789666", "0.6374264", "0.637357", "0.6367998", "0.6356014" ]
0.72484934
39
_operator()_ Used as callback to handle events that have been subscribed to
def __call__(self, event, payload): logging.debug("Received Event: %s" % event) logging.debug("Payload: %s" % payload) # start debug event if event == "MergeAccountant:StartDebug": logging.getLogger().setLevel(logging.DEBUG) return # stop debug event if event == "MergeAccountant:EndDebug": logging.getLogger().setLevel(logging.INFO) return # enable event if event == "MergeAccountant:Enable": self.enabled = True return # disable event if event == "MergeAccountant:Disable": self.enabled = False return # a job has finished if event == "JobSuccess": try: self.jobSuccess(payload) except Exception, ex: msg = "Unexpected error when handling a " msg += "JobSuccess event: " + str(ex) msg += traceback.format_exc() logging.error(msg) return # a job has failed if event == "GeneralJobFailure": try: self.jobFailed(payload) except Exception, msg: logging.error("Unexpected error when handling a " + \ "GeneralFailure event: " + str(msg)) return # wrong event logging.debug("Unexpected event %s, ignored" % event)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subscribe(observer):", "def subscribe(observer):", "def observe(self, event_pattern, priority, callback):\n pass # pragma: no cover", "def __call__(self, trigger, type, event):", "def subscribe(self, event_handler):\n pass # pragma: no cover", "def events(self):", "def doEvent(self, source):\n pass", "def on_event(self, event):", "def on_event(self, event):\r\n pass", "def onEvent(self, eventName, callBack):\n self.msg_namespace.on('ops', callBack)", "def callback(self):\n pass # pragma: no cover", "def on_event(self, event):\n pass", "def visit_event(self, event):", "def event_in_cb(self, msg):\n self.event = msg.data", "def event_queue_proc(self,event):\r\n event()", "def callback(self, *args):\n raise NotImplementedError()", "def dispatch(self, sender, event, *args, **kwargs):\n pass # pragma: no cover", "def on(self):\n raise NotImplementedError", "def on_clicked(self, func):\n return self._observers.connect('clicked', func)", "def on(self) -> None:", "def react_to_event(self):\n raise NotImplementedError()", "def event_receive(self,event):\n\n pass", "def event(self, fn):\n self.bind({fn.__name__: fn})", "def on(self):", "def subscribe(receiver, catchup):", "def handle_event(self, event):", "def on(self) -> None:\n ...", "def on(self): # pylint: disable=invalid-name\n self._make_event(1)", "def _(event):\n pass", "def _(event):\n pass", "def _(event):\n pass", "def msg_event(self, event):\r\n pass", "def on_clicked(self, func):\n return self._observers.connect('clicked', lambda event: func(event))", "def decorator(func):\n self.subscribe(func, event, *events)\n return func", "def __call__(self, args, kwargs):\n callback = self._callback_ref()\n if callback is not None:\n callback(*args, **kwargs)", "def invoke(self, event_args, *args, **kwargs):\n pass # pragma: no cover", "def _notify(self, observable):\n pass", "def post(self, event, *args, **kwargs):\n self.inq.Signal((event, args, kwargs))", "def process_event(self, event):\r\n pass", "def notify(self, observable, *args, **kwargs) -> None:", "def dispatcher(self):\n pass # pragma: no cover", "def subscribe(receiver):", "def subscribe(receiver):", "def subscribe(receiver):", "def trigger(self, type, event):", "def notifyObservers(self):", "def __init__(self):\n _subscribe_marked_events(self)", "def listen(obj, name, func):\n _signals(obj, name).append(func)", "def __call__(self, event):\n if not self.events or event in self.events:\n super(EventHandler, self).__call__(event)", "def process(self, event):\n pass", "def __call__(self, state, action):\n pass", "def OnAccept(self, event):\n pass", "def on_clicked(self, func):\n cid = self.cnt\n self.observers[cid] = func\n self.cnt += 1\n return cid", "def on_clicked(self, func):\n cid = self.cnt\n self.observers[cid] = func\n self.cnt += 1\n return cid", "def accept(self, event):\n raise NotImplementedError()", "def run(self, event):\n pass", "def onAction(*args):", "def onAction(*args):", "def onAction(*args):", "def onAction(*args):", "def enqueue(self,e):", "def perform_callback(self, *args, **kwargs):\n pass", "def observe(self, fn):\n self.observers.append(fn)\n return fn", "def subscribeToEvent(eventName,subscriber,msgInterface):", "def observerRead(self, x):\n pass", "def _(event):\n # TODO", "def callback(name):\n f = \"on%s\" % \"\".join(x.capitalize() for x in name.split())\n return lambda: getattr(self, f)()", "def closure(self, t):\n raise NotImplementedError", "def __init__(self, callback, *args, **kwargs):\n self.callback = lambda: callback(*args, **kwargs)", "def subscribe():\n def func_wrapper(func):\n queue.observe_on(scheduler).subscribe(func)\n return func\n return func_wrapper", "def dummy_callback(obj):\n pass", "def _onconnect(self):\n\n pass", "def _emited(self, *args):\n\t\tdebug(\"OnEventDeferred : event catched\")\n\t\tself.callback(*args)\n\t\tself._clean()", "def run(self, in_op):\n raise NotImplementedError", "def operator(self, operator):\n\n self._operator = operator", "def subscribe(self, func, insert=False):\n if insert:\n self.observers.appendleft(func)\n else:\n self.observers.append(func)", "def __iter__(self):\n return iter(self.events)", "def listen(eventType):\n def _decoration(fcn):\n fcn.listen = True\n fcn.eventType = eventType\n return fcn\n return _decoration", "def func ( self ) :\n return self.__func", "def fire(self):", "def dispatch(self, event: str, message: str) -> None:\n\t\tfor subscriber, callback in self.get_subscribers(event).items():\n\t\t\tcallback(event, message)", "def right_callback(self):\n self.rokucontrol.right_callback()", "def on_clicked(self, func):\n return self._observers.connect('clicked', lambda text: func(text))", "def subscribe(ami, worker):\n\n if hasattr(worker, \"event\"):\n for event in worker.event:\n if isinstance(event, (str, unicode)):\n functionName = \"handle_\" + event\n if hasattr(worker, functionName):\n functionToCall = getattr(worker, functionName)\n ami.events.subscribe(event, functionToCall)\n return", "def on_changed(self, func):\n return self._observers.connect('changed', lambda val: func(val))", "def on_changed(self, func):\n return self._observers.connect('changed', lambda val: func(val))", "def on_submit(self, func):\n return self._observers.connect('submit', lambda text: func(text))", "def poll_function(self):\n return self.poll_instruction", "def signal_oi(self):\n pass", "def next(self, in_op):\n raise NotImplementedError", "def add_operator(self, operator: Callable) -> None:\n self.operators.append(operator)", "def act(self):\n pass", "def func ( self ) :\n return self.__func", "def handle_event(self, event):\n pass", "def triggered(self, *args, **kwargs): # real signature unknown\n pass", "def callback_connect(self):\n pass", "def callback_connect(self):\n pass", "def callback_connect(self):\n pass", "def signal(self):\n pass", "def _poll_event_queue(self):\n if not self.event_queue.empty():\n obj = self.event_queue.get(block=False)\n if isinstance(obj, tuple):\n if len(obj) == 1:\n obj[0]()\n elif len(obj) == 2:\n if isinstance(obj[1], list):\n obj[0](*obj[1])\n elif isinstance(obj[1], dict):\n obj[0](**obj[1])\n elif len(obj) == 3:\n obj[0](*obj[1], **obj[2])\n self.view.after(100, self._poll_event_queue)", "def subscribe_on(self, callback: callable):\n topic = f\"{self._subscriber_topic}_on\"\n subscribe_topic(callback, topic)" ]
[ "0.65357095", "0.65357095", "0.65028983", "0.6397221", "0.62587845", "0.62279207", "0.6155505", "0.61392504", "0.61106503", "0.6109636", "0.60489565", "0.5954443", "0.5939382", "0.5924449", "0.58737683", "0.58658725", "0.5865626", "0.58425915", "0.5839737", "0.58376145", "0.58344793", "0.5807417", "0.5800934", "0.5798854", "0.57865494", "0.57845455", "0.5783788", "0.5775894", "0.57644814", "0.57644814", "0.57644814", "0.57597077", "0.57335055", "0.5710346", "0.567359", "0.5661764", "0.5655398", "0.56489575", "0.56398034", "0.5637617", "0.56343", "0.5630013", "0.5630013", "0.5630013", "0.5623131", "0.5606023", "0.56024015", "0.5602058", "0.55977213", "0.5595422", "0.5575919", "0.5575009", "0.55694145", "0.55694145", "0.55609345", "0.55431974", "0.5532454", "0.5532454", "0.5532454", "0.5532454", "0.5519798", "0.55193347", "0.5517166", "0.5512528", "0.54968184", "0.5483814", "0.5479886", "0.5475422", "0.5472853", "0.54706573", "0.5461555", "0.5450101", "0.5447252", "0.5443857", "0.5432873", "0.54304457", "0.5430231", "0.542732", "0.5426551", "0.5424545", "0.5416991", "0.541301", "0.5412039", "0.5394616", "0.53765845", "0.53765845", "0.5369049", "0.5366397", "0.5366363", "0.535852", "0.5355678", "0.5351067", "0.5339861", "0.5337857", "0.53144896", "0.5310004", "0.5310004", "0.5310004", "0.53081644", "0.53051805", "0.5301945" ]
0.0
-1
_jobSuccess_ A job has finished successfully. Non merge jobs are ignored. If it is complete success, all input files are marked as 'merged' and the output file as 'merged'. JobSuccess for partial merge are not implemented yet!. When it
def jobSuccess(self, jobReport): jobName = None try: #// Invoke job report handler with jobReport location and flag to enable/disable merge job report handling handler = ReportHandler(jobReport, int(self.args['MaxInputAccessFailures']), enableMergeHandling=self.enabled) jobName = handler() logging.info('this is jobname'+ str(jobName)) except Exception, ex: msg = "Failed to handle job report from job:\n" msg += "%s\n" % jobReport msg += str(ex) msg += "\n" msg += traceback.format_exc() logging.error(msg) #// Failed to read job report if jobName is None: return # files can be cleaned up now logging.info("trigger cleanup for: %s" % jobName) try: self.trigger.setFlag("cleanup", jobName, "MergeAccountant") except (ProdAgentException, ProdException): logging.error("trying to continue processing success event") return #// END jobSuccess
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_job_success(self, job):\n super().handle_job_success(job)\n\n self._handle_job_status(job, \"finished\")", "def jobFailed(self, jobName):\n\n # ignore non merge jobs\n if jobName.find('mergejob') == -1:\n logging.info(\"Ignoring job %s, since it is not a merge job\" \\\n % jobName)\n # Add cleanup flag for non merge jobs too\n logging.info(\"trigger cleanup for: %s\" % jobName)\n try:\n self.trigger.setFlag(\"cleanup\", jobName, \"MergeAccountant\")\n except (ProdAgentException, ProdException):\n logging.error(\"trying to continue processing failure event\")\n return\n\n # files can be cleaned up now\n logging.info(\"trigger cleanup for: %s\" % jobName)\n\n try:\n self.trigger.setFlag(\"cleanup\", jobName, \"MergeAccountant\")\n except (ProdAgentException, ProdException):\n logging.error(\"trying to continue processing failure event\")\n\n # verify enable condition\n if not self.enabled:\n return\n\n # open a DB connection\n database = MergeSensorDB()\n\n # start a transaction\n database.startTransaction()\n\n # get job information\n try:\n jobInfo = database.getJobInfo(jobName)\n\n # cannot get it!\n except Exception, msg:\n logging.error(\"Cannot process Failure event for job %s: %s\" \\\n % (jobName, msg))\n database.closeDatabaseConnection()\n return\n\n # check that job exists\n if jobInfo is None:\n logging.error(\"Job %s does not exist.\" % jobName)\n database.closeDatabaseConnection()\n return\n\n # check status\n if jobInfo['status'] != 'undermerge':\n logging.error(\"Cannot process Failure event for job %s: %s\" \\\n % (jobName, \"the job is not currently running\"))\n database.closeDatabaseConnection()\n\n return\n\n # get dataset id\n datasetId = database.getDatasetId(jobInfo['datasetName'])\n\n # mark all input files as 'unmerged' (or 'invalid')\n unFinishedFiles = []\n for fileName in jobInfo['inputFiles']:\n\n # update status\n newStatus = database.updateInputFile(\\\n datasetId, fileName, \\\n status = \"unmerged\", \\\n maxAttempts = int(self.args['MaxInputAccessFailures']))\n\n # add invalid files to list of non finished files\n if newStatus == 'invalid':\n unFinishedFiles.append(fileName)\n\n # mark output file as 'failed'\n database.updateOutputFile(datasetId, jobName=jobName, status='failed')\n\n # commit changes\n database.commit()\n\n # notify the PM about the unrecoverable files\n if len(unFinishedFiles) > 0:\n File.merged(unFinishedFiles, True)\n\n # log message\n logging.info(\"Job %s failed, file information updated.\" % jobName)\n\n # close connection\n database.closeDatabaseConnection()", "def jobComplete(self):\n self._Finished = True\n return", "def on_merge(self, to_be_merged, merge_result, context):\n pass", "def test_job_complete(self):\r\n t = mergeorder(['A', 'B', 'C', 'D', 'E'], 'foo')\r\n self.assertFalse(job_complete(t))\r\n self.assertFalse(job_complete(t.Children[0]))\r\n self.assertFalse(job_complete(t.Children[1].Children[1]))\r\n\r\n self.assertRaises(JobError, job_complete, t.Children[0].Children[0])\r\n\r\n f = 'test_parallel_merge_otus_JOB_COMPLETE_TEST.poll'\r\n self.assertFalse(os.path.exists(f))\r\n\r\n testf = open(f, 'w')\r\n testf.write('0\\n')\r\n testf.close()\r\n t.PollPath = f\r\n t.StartTime = 10\r\n\r\n self.assertTrue(job_complete(t))\r\n self.assertNotEqual(t.EndTime, None)\r\n self.assertNotEqual(t.TotalTime, None)\r\n\r\n testf = open(f, 'w')\r\n testf.write('1\\n')\r\n testf.close()\r\n\r\n self.assertRaises(JobError, job_complete, t)\r\n t.Processed = False\r\n self.assertRaises(JobError, job_complete, t)\r\n\r\n os.remove(f)", "def add_merge_job(dax, final_name, chunk, level, job_number, final):\n j = Job(name=\"merge.sh\")\n out_file_name = final_name + \"-%d-%d.tar.gz\" %(level, job_number)\n out_file = File(out_file_name)\n if final:\n out_file_name = final_name\n out_file = File(final_name)\n j.uses(out_file, link=Link.OUTPUT, transfer=final)\n j.addArguments(out_file)\n for f in chunk:\n flfn = File(f)\n j.uses(flfn, link=Link.INPUT)\n j.addArguments(flfn)\n j.addProfile(Profile(Namespace.CONDOR, 'request_disk', '100 GB'))\n dax.addJob(j)\n return out_file_name", "def handle_completed_job(job, job_set, event_list):\n if not job.postvalidate():\n event_list = push_event(\n event_list,\n '{} completed but doesnt have expected output'.format(job.get_type()))\n job.status = JobStatus.FAILED\n\n if job.get_type() == 'coupled_diagnostic':\n img_dir = 'coupled_diagnostics_{casename}-obs'.format(\n casename=job.config.get('test_casename'))\n img_src = os.path.join(\n job.config.get('coupled_project_dir'),\n img_dir)\n setup_local_hosting(job, event_list, img_src)\n elif job.get_type() == 'amwg_diagnostic':\n img_dir = 'year_set_{year}{casename}-obs'.format(\n year=job.config.get('year_set'),\n casename=job.config.get('test_casename'))\n img_src = os.path.join(\n job.config.get('test_path_diag'),\n '..',\n img_dir)\n setup_local_hosting(job, event_list, img_src)\n elif job.get_type() == 'uvcmetrics':\n img_src = os.path.join(job.config.get('--outputdir'), 'amwg')\n setup_local_hosting(job, event_list, img_src)\n job_set_done = True\n for job in job_set.jobs:\n if job.status != JobStatus.COMPLETED:\n job_set_done = False\n break\n if job.status == JobStatus.FAILED:\n job_set.status = SetStatus.FAILED\n return\n if job_set_done:\n job_set.status = SetStatus.COMPLETED", "def test_jobs_successful(self):\n\n workspace = storage_test_utils.create_workspace()\n file1 = storage_test_utils.create_file()\n data_dict = {\n 'version': '1.0',\n 'input_data': [{\n 'name': 'INPUT_IMAGE',\n 'file_id': file1.id\n }],\n 'output_data': [{\n 'name': 'output_file_pngs',\n 'workspace_id': workspace.id\n }]}\n\n secret_configuration = {\n 'version': '6',\n 'priority': 50,\n 'output_workspaces': {'default': storage_test_utils.create_workspace().name},\n 'settings': {\n 'DB_HOST': 'som.host.name',\n 'DB_PASS': 'secret_password'\n }\n }\n\n seed_job_type = job_test_utils.create_seed_job_type(configuration=secret_configuration)\n seed_job = job_test_utils.create_job(job_type=seed_job_type, status='RUNNING', input=data_dict)\n\n url = '/%s/jobs/%d/' % (self.api, seed_job.id)\n response = self.client.generic('GET', url)\n result = json.loads(response.content)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n self.assertEqual(result['configuration']['priority'],50)\n self.assertNotIn('DB_PASS', result['configuration']['settings'])", "def job_completed(self,event):\n if event.exception:\n logger.worker.warning('The job crashed :(')\n else:\n logger.worker.warning(self.task_id+'The job finished ')\n # set job complete to true, will display complete in web interface \n self.job_complete_status[self.task_id] = True", "def job_complete(node, verbose=False):\r\n if node.PollPath is None or node.istip():\r\n raise JobError(\"Attempting to merge tip: %s\" % node.Name)\r\n\r\n if node.Processed:\r\n raise JobError(\"Already processed node: %s\" % node.Name)\r\n\r\n if os.path.exists(node.PollPath):\r\n node.EndTime = time()\r\n node.TotalTime = node.EndTime - node.StartTime\r\n\r\n node.ExitStatus = open(node.PollPath).read().strip()\r\n if node.ExitStatus != '0':\r\n raise JobError(\"Node %s did not complete correctly!\" % node.Name)\r\n\r\n if verbose:\r\n print \"finishing %s, %f seconds\" % (node.Name, node.TotalTime)\r\n\r\n node.Processed = True\r\n return True\r\n\r\n else:\r\n return False", "def complete_job(self, command_dict):\n job_uuid = command_dict['job_uuid']\n try:\n job = Job[job_uuid]\n except KeyError as e:\n # Job not found is not worth re-raising\n logger.warn(e)\n logger.warn(\"Job {} missing\".format(job_uuid))\n return\n\n logger.info(\"job {} finished with status of {}\".format(job.uuid,\n job.status))\n # Get the job log from the worker\n logger.info(\"retrieving log for job {}\".format(job.uuid))\n job_data_dir = os.path.join(self.data_dir, job.uuid)\n if(not os.path.exists(job_data_dir)):\n os.mkdir(job_data_dir)\n\n fetch_file_from_url(job.log_url(), job_data_dir)\n\n # Now get the job output data from the worker\n if(job.status == Job.STATUS_PROCESSED):\n\n logger.info(\"retrieving output for job {}\".format(job.uuid))\n fetch_file_from_url(job.download_url(), job_data_dir)\n job.status = Job.STATUS_COMPLETE\n\n job.on_primary = True\n # save job\n Job[job.uuid] = job", "def codepipeline_success(job_id):\n try:\n codepipeline = boto3.client('codepipeline')\n codepipeline.put_job_success_result(jobId=job_id)\n LOGGER.info('===SUCCESS===')\n return True\n except ClientError as err:\n LOGGER.error(\"Failed to PutJobSuccessResult for CodePipeline!\\n%s\", err)\n return False", "def cuffmerge(job, config, name, samples, manifest):\n\n stats_root = \"{}_cuffmerge_stats\".format(config['run_id'])\n logfile = \"{}.cuffmerge.log\".format(config['run_id'])\n\n command = [\"{}\".format(config['cuffmerge']['bin']),\n \"-g {}\".format(config['transcript_reference']),\n \"-s {}\".format(config['reference']),\n \"-p {}\".format(config['cuffmerge']['num_cores']),\n \"{}\".format(manifest)]\n\n job.fileStore.logToMaster(\"Cuffmerge Command: {}\\n\".format(command))\n pipeline.run_and_log_command(\" \".join(command), logfile)\n\n pwd = os.getcwd()\n config['merged_transcript_reference'] = os.path.join(pwd, \"merged.gtf\")\n\n return stats_root", "def check_upload_complete(self, job_id):\n post_json = {\"upload_id\": job_id}\n return self.app.post_json(\"/v1/finalize_job/\", post_json, headers={\"x-session-id\": self.session_id})", "def _check_for_finished_job(self):\n raise NotImplementedError", "def put_job_success(job, message):\n print('Putting job success')\n print(message)\n code_pipeline.put_job_success_result(jobId=job)", "def test_successful_job(self, _is_coalesced):\n successful_job = json.loads(BASE_JSON % (SUCCESS, 1433166610, 1, 1433166609))[0]\n self.assertEquals(self.query_api.get_job_status(successful_job), SUCCESS)", "def job_done(self, success):\n run_usage = self._attempt.get_usage()\n self._usage.append(run_usage)\n\n log.debug(\"job_done job_id=%s success=%s (last attempt %s\", self.job_id, success, self._attempt_ids[-1])\n self._attempt = None", "def test_job_replacement():\n with tempfile.TemporaryDirectory() as STATUS_DIR:\n Status.add_job(STATUS_DIR, 'generation', 'test1',\n job_attrs={'job_status': 'submitted'})\n\n Status.add_job(STATUS_DIR, 'generation', 'test1',\n job_attrs={'addition': 'test',\n 'job_status': 'finished'},\n replace=True)\n\n status = Status(STATUS_DIR).data['generation']['test1']['job_status']\n addition = Status(STATUS_DIR).data['generation']['test1']['addition']\n assert status == 'finished'\n assert addition == 'test'", "def wait(self):\n if not self.submitted:\n if _conf.get_option('jobs', 'auto_submit'):\n _logme.log('Auto-submitting as not submitted yet', 'debug')\n self.submit()\n _sleep(0.5)\n else:\n _logme.log('Cannot wait for result as job has not been ' +\n 'submitted', 'warn')\n return False\n _sleep(0.1)\n self.update()\n if self.done:\n return True\n _logme.log('Waiting for self {}'.format(self.name), 'debug')\n if self.queue.wait(self) is not True:\n return False\n # Block for up to file_block_time for output files to be copied back\n btme = _conf.get_option('jobs', 'file_block_time')\n # btme = 2\n start = _dt.now()\n lgd = False\n while True:\n if not lgd:\n _logme.log('Checking for output files', 'debug')\n lgd = True\n count = 0\n for i in self.outfiles:\n if _os.path.isfile(i):\n count += 1\n if count == len(self.outfiles):\n _logme.log('All output files found in {} seconds'\n .format(count), 'debug')\n break\n _sleep(0.1)\n if (_dt.now() - start).seconds > btme:\n _logme.log('Job completed but files have not appeared for ' +\n '>{} seconds'.format(btme))\n return False\n self.update()\n return True", "def _finish_context(self, success):\n try:\n # Mark the job as complete.\n self.__complete = True\n\n # Update the job status within the procedure. Currently, if this\n # fails, we are not sure whether the changes have succeeded or\n # not. This is something that needs to be improved in the near\n # future.\n self.__procedure.add_executed_job(self)\n\n except _errors.DatabaseError as error:\n _LOGGER.error(\n \"Error in %s finishing job's context.\",\n self.__action.__name__, exc_info=error\n )", "def search_complete(job):\n job.refresh()\n return job[\"dispatchState\"] in (\"FAILED\", \"DONE\")", "def fileIsComplete(self):\n return True", "def _is_job_finished(self, job_id):\n complete, rc, status, result, task = False, 0, None, None, None\n job = self.get_job_by_id(job_id)\n if job:\n status = job['status']\n try:\n result, task = job['result'], job['task']\n except KeyError:\n pass\n if status.lower() == SUCCEEDED:\n complete = True\n elif status.lower() in INCOMPLETE_LIST:\n complete = False\n else:\n rc, complete = -1, True\n return complete, result, rc, status, task", "def _handle_success(self, result_ttl: int, pipeline: 'Pipeline'):\n # self.log.debug('Setting job %s status to finished', job.id)\n self.set_status(JobStatus.FINISHED, pipeline=pipeline)\n # Result should be saved in job hash only if server\n # doesn't support Redis streams\n include_result = not self.supports_redis_streams\n # Don't clobber user's meta dictionary!\n self.save(pipeline=pipeline, include_meta=False, include_result=include_result)\n # Result creation should eventually be moved to job.save() after support\n # for Redis < 5.0 is dropped. job.save(include_result=...) is used to test\n # for backward compatibility\n if self.supports_redis_streams:\n from .results import Result\n\n Result.create(self, Result.Type.SUCCESSFUL, return_value=self._result, ttl=result_ttl, pipeline=pipeline)\n\n if result_ttl != 0:\n finished_job_registry = self.finished_job_registry\n finished_job_registry.add(self, result_ttl, pipeline)", "def test_successful_job(self):\n\n successful_job = json.loads(TREEHERDER_JOB % (\"success\", \"completed\"))\n self.assertEquals(self.query_api.get_job_status(successful_job), SUCCESS)", "def createMergeJob(self, mergeFiles):\n if self.currentGroup == None:\n self.newGroup()\n\n self.newJob(name = self.getJobName())\n mergeFiles.sort(key=cmp_to_key(fileCompare))\n\n jobSize = 0\n largestFile = 0\n for mergeFile in mergeFiles:\n\n jobSize += mergeFile[\"file_size\"]\n largestFile = max(largestFile, mergeFile[\"file_size\"])\n\n newFile = File(id = mergeFile[\"file_id\"],\n lfn = mergeFile[\"file_lfn\"],\n events = mergeFile[\"file_events\"])\n\n # The WMBS data structure puts locations that are passed in through\n # the constructor in the \"newlocations\" attribute. We want these to\n # be in the \"locations\" attribute so that they get picked up by the\n # job submitter.\n newFile[\"locations\"] = set([mergeFile[\"pnn\"]])\n newFile.addRun(Run(mergeFile[\"file_run\"], mergeFile[\"file_lumi\"]))\n self.currentJob.addFile(newFile)\n\n # job time based on\n # - 5 min initialization\n # - 5MB/s merge speed\n # - checksum calculation at 5MB/s (twice)\n # - stageout at 5MB/s\n # job disk based on\n # - input for largest file on local disk\n # - output on local disk (factor 1)\n jobTime = 300 + (jobSize*4)/5000000\n self.currentJob.addResourceEstimates(jobTime = jobTime, disk = (jobSize+largestFile)/1024)\n\n return", "def job_step_complete(self, job_request_payload):\n if job_request_payload.success_command == JobCommands.STORE_JOB_OUTPUT_COMPLETE:\n raise ValueError(\"Programmer error use use job_step_store_output_complete instead.\")\n payload = JobStepCompletePayload(job_request_payload)\n self.send(job_request_payload.success_command, payload)", "def is_merged(self):\r\n url = '{0}/merge'.format(self.get_url())\r\n\r\n return http.Request('GET', url), resource.parse_boolean", "def complete(self):\n return self._properties.get(\"jobComplete\")", "def complete_job(self, job, token, status, error, res, context=None):\n return self._client.call_method(\n 'UserAndJobState.complete_job',\n [job, token, status, error, res], self._service_ver, context)", "def up(job, inputFileID1, inputFileID2, memory=sortMemory):\n with job.fileStore.writeGlobalFileStream() as (fileHandle, outputFileStoreID):\n with job.fileStore.readGlobalFileStream( inputFileID1 ) as inputFileHandle1:\n with job.fileStore.readGlobalFileStream( inputFileID2 ) as inputFileHandle2:\n merge(inputFileHandle1, inputFileHandle2, fileHandle)\n job.fileStore.logToMaster( \"Merging %s and %s to %s\"\n % (inputFileID1, inputFileID2, outputFileStoreID) )\n #Cleanup up the input files - these deletes will occur after the completion is successful. \n job.fileStore.deleteGlobalFile(inputFileID1)\n job.fileStore.deleteGlobalFile(inputFileID2)\n return outputFileStoreID", "def check_job_status(job):\n assert isinstance(job, PreprocessJob),\\\n 'job must be a PreprocessJob'\n\n if job.is_finished():\n return True\n\n return True\n \"\"\"\n ye_task = AsyncResult(job.task_id,\n app=preprocess_csv_file)\n\n if ye_task.state == 'SUCCESS':\n\n if ye_task.result['success']:\n\n preprocess_data = ContentFile(json.dumps(ye_task.result['data']))\n\n new_name = 'preprocess_%s.json' % get_alphanumeric_lowercase(8)\n job.metadata_file.save(new_name,\n preprocess_data)\n job.set_state_success()\n\n job.user_message = 'Task completed! Preprocess is available'\n job.save()\n\n else:\n # Didn't work so well\n job.set_state_failure(ye_task.result['message'])\n job.save()\n\n ye_task.forget()\n return True\n\n elif ye_task.state == STATE_FAILURE:\n job.set_state_failure('ye_task failed....')\n job.save()\n ye_task.forget()\n return True\n\n return False\n \"\"\"", "def run(self, fake=False):\n condor_job_dicts = self.get_running_condor_jobs()\n condor_job_indices = set([int(rj[\"jobnum\"]) for rj in condor_job_dicts])\n\n # main loop over input-output map\n for ins, out in self.io_mapping:\n # force a recheck to see if file exists or not\n # in case we delete it by hand to regenerate\n out.recheck() \n index = out.get_index() # \"merged_ntuple_42.root\" --> 42\n on_condor = index in condor_job_indices\n done = (out.exists() and not on_condor)\n if done:\n self.handle_done_output(out)\n continue\n\n if fake:\n out.set_fake()\n\n if not on_condor:\n # Submit and keep a log of condor_ids for each output file that we've submitted\n succeeded, cluster_id = self.submit_condor_job(ins, out, fake=fake)\n if succeeded:\n if index not in self.job_submission_history: self.job_submission_history[index] = []\n self.job_submission_history[index].append(cluster_id)\n self.logger.info(\"Job for ({0}) submitted to {1}\".format(out, cluster_id))\n\n else:\n this_job_dict = next(rj for rj in condor_job_dicts if int(rj[\"jobnum\"]) == index)\n cluster_id = this_job_dict[\"ClusterId\"]\n\n running = this_job_dict.get(\"JobStatus\",\"I\") == \"R\"\n idle = this_job_dict.get(\"JobStatus\",\"I\") == \"I\"\n held = this_job_dict.get(\"JobStatus\",\"I\") == \"H\"\n hours_since = abs(time.time()-int(this_job_dict[\"EnteredCurrentStatus\"]))/3600.\n\n if running:\n self.logger.debug(\"Job {0} for ({1}) running for {2:.1f} hrs\".format(cluster_id, out, hours_since))\n\n if hours_since > 24.0:\n self.logger.debug(\"Job {0} for ({1}) removed for running for more than a day!\".format(cluster_id, out))\n Utils.condor_rm([cluster_id])\n\n elif idle:\n self.logger.debug(\"Job {0} for ({1}) idle for {2:.1f} hrs\".format(cluster_id, out, hours_since))\n\n elif held:\n self.logger.debug(\"Job {0} for ({1}) held for {2:.1f} hrs with hold reason: {3}\".format(cluster_id, out, hours_since, this_job_dict[\"HoldReason\"]))\n\n if hours_since > 5.0:\n self.logger.info(\"Job {0} for ({1}) removed for excessive hold time\".format(cluster_id, out))\n Utils.condor_rm([cluster_id])", "def __is_complete__(self,configs,*args,**kwargs):\n current_dir = self.output_dir\n if GenericProcess.__is_complete__(self,*args,**kwargs):\n return True\n elif not os.path.isfile(self.complete_file):\n if hasattr(self,\"upload_dir\"):\n current_dir = self.upload_dir\n if not os.path.isfile(self.complete_file.replace(self.output_dir,self.upload_dir)): #If the output directory has already been cleaned, check the upload dir.\n return False\n else: \n return False\n if hasattr(self, \"snp_path\") and not self.snp_path is None and hasattr(self,\"analysis_ready_bam_path\") and not self.analysis_ready_bam_path is None:\n if not os.path.isdir(os.path.dirname(self.snp_path)) or not os.path.dirname(os.path.isfile(self.analysis_ready_bam_path)):\n return False\n if not os.path.isfile(self.snp_path) or not os.path.isfile(self.analysis_ready_bam_path):\n snp_file = False\n bam_file = False\n return False\n if not self.upload_dir is None:\n for file in os.listdir(os.path.join(self.upload_dir,self.description)):\n if file.endswith('.vcf'):\n snp_file = True \n if file.endswith('.bam'):\n bam_file = True \n if not snp_file or not bam_file:\n if configs[\"system\"].get(\"Logging\",\"debug\") is \"True\":\n print \"At least one of the output files is missing for sample \" + str(self.sample_key) + \":\"\n if not os.path.isfile(self.snp_path):\n print \"Missing \"+ self.snp_path\n if not os.path.isfile(self.analysis_ready_bam_path):\n print \"Missing \"+ self.analysis_ready_bam_path\n #os.remove(self.complete_file)\n #template_dir = configs['system'].get('Common_directories','template')\n #qsub_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','bcbio_no_postprocess'))\n #self.__fill_template__(qsub_template,os.path.join(self.output_dir,\"bcbio_no_postprocess.sh\"))\n #self.__launch__(configs['system'],os.path.join(self.output_dir,\"bcbio_no_postprocess.sh\"))\n return False\n else:\n check_file = os.path.join(current_dir,'project-summary.csv')\n #If the process is complete, check to make sure that the check file is created. If not, send email once.\n if not os.path.isfile(check_file) and configs['pipeline'].has_option('Template_files','bcbio_no_postprocess') and current_dir==self.output_dir:\n #subject, body = self.__generate_general_error_text__(config)\n #send_email(subject,body)\n #self.fail_reported = True\n os.remove(self.complete_file)\n template_dir = configs['system'].get('Common_directories','template')\n qsub_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','bcbio_no_postprocess'))\n self.__fill_template__(qsub_template,os.path.join(self.output_dir,\"bcbio_no_postprocess.sh\"))\n self.__launch__(configs['system'],os.path.join(self.output_dir,\"bcbio_no_postprocess.sh\"))\n return False\n #store_stats_in_db(self)\n self.__finish__(*args,**kwargs)\n return True", "def test_job_addition():\n with tempfile.TemporaryDirectory() as STATUS_DIR:\n Status.add_job(STATUS_DIR, 'generation', 'test1')\n status1 = Status(STATUS_DIR).data['generation']['test1']['job_status']\n\n Status.add_job(STATUS_DIR, 'generation', 'test1',\n job_attrs={'job_status': 'finished',\n 'additional': 'test'})\n status2 = Status(STATUS_DIR).data['generation']['test1']['job_status']\n\n assert status2 == status1", "def mark(self, job, status='succeeded'):\n pass", "def merge(): #Status: WIP\r\n pass", "def is_merged(self):\n return self.get_data(\"state\") == self.STATE_MERGED", "def merged(self) -> bool:\n return pulumi.get(self, \"merged\")", "def defineMergeJobs(self, mergeableFiles):\n mergeJobFileSize = 0\n mergeJobEvents = 0\n mergeJobFiles = []\n earliestInsert = 999999999999999\n\n mergeableFiles.sort(key=cmp_to_key(fileCompare))\n\n for mergeableFile in mergeableFiles:\n if mergeableFile[\"file_size\"] > self.maxMergeSize or \\\n mergeableFile[\"file_events\"] > self.maxMergeEvents:\n self.createMergeJob([mergeableFile])\n continue\n elif mergeableFile[\"file_size\"] + mergeJobFileSize > self.maxMergeSize or \\\n mergeableFile[\"file_events\"] + mergeJobEvents > self.maxMergeEvents:\n if mergeJobFileSize > self.minMergeSize or \\\n self.forceMerge == True or \\\n time.time() - mergeableFile['insert_time'] > self.maxWaitTime:\n self.createMergeJob(mergeJobFiles)\n mergeJobFileSize = 0\n mergeJobEvents = 0\n mergeJobFiles = []\n else:\n continue\n\n mergeJobFiles.append(mergeableFile)\n mergeJobFileSize += mergeableFile[\"file_size\"]\n mergeJobEvents += mergeableFile[\"file_events\"]\n if mergeableFile['insert_time'] < earliestInsert:\n earliestInsert = mergeableFile['insert_time']\n\n if mergeJobFileSize > self.minMergeSize or self.forceMerge == True or \\\n time.time() - earliestInsert > self.maxWaitTime:\n if len(mergeJobFiles) > 0:\n self.createMergeJob(mergeJobFiles)\n\n return", "def save_trans_end_of_job(wcl, jobfiles, putinfo):\n\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"BEG\")\n miscutils.fwdebug_print(\"len(putinfo) = %d\" % len(putinfo))\n\n job2target = 'never'\n if pfwdefs.USE_TARGET_ARCHIVE_OUTPUT in wcl:\n job2target = wcl[pfwdefs.USE_TARGET_ARCHIVE_OUTPUT].lower()\n job2home = 'never'\n if pfwdefs.USE_HOME_ARCHIVE_OUTPUT in wcl:\n job2home = wcl[pfwdefs.USE_HOME_ARCHIVE_OUTPUT].lower()\n\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"job2target = %s\" % job2target)\n miscutils.fwdebug_print(\"job2home = %s\" % job2home)\n\n if putinfo:\n # if not end of job and transferring at end of job, save file info for later\n if job2target == 'job' or job2home == 'job':\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"Adding %s files to save later\" % len(putinfo))\n jobfiles['output_putinfo'].update(putinfo)\n\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"END\\n\\n\")", "def check_jobs(self):\n # New/aborted jobs\n try:\n jobs = self.sm.get_job('%', phase = 'QUEUED')\n for job in jobs:\n self._launch_job(Job(job['job']))\n res = self.sm.get_aborted_jobs()\n aborts = [x['identifier'] for x in res]\n # Completed jobs\n for t in self.threads:\n if t.isDone() or t.name in aborts:\n self.threads.remove(t)\n # Set job status to COMPLETED\n job = Job(self.sm.get_job(t.name)[0]['job'])\n if t._Future__excpt == None:\n job.set_phase('COMPLETED')\n if t._Future__result != None:\n job.set_results(t._Future__result) \n status = True\n else:\n job.set_phase('ERROR')\n job.set_error_summary(str(t._Future__excpt[1]).replace(\"'\", \"\"))\n status = False\n job.set_end_time(datetime.utcnow().isoformat())\n self.sm.update_job(job = job, completed = status)\n except Exception, e:\n print \"Error:\", e", "def __process_live_merging(self, finished, manager_data, tracker):\n\n if (\n PyFunceble.CONFIGURATION.multiprocess_merging_mode == \"live\"\n and not finished\n and not self.autosave.is_time_exceed()\n ):\n self.__merge_processes_data(manager_data, tracker=tracker)\n\n return True\n\n return False", "def merge(self , station = '' , datasets = ''):\n \n \n \n a = self.initialize_data( station = station, datasets = datasets ) # reading the input files \n dummy = self.merge_all_data() \n logging.info('*** Finished merging, now writing the output netCDF file ***' ) \n a = self.write_merged_file()\n logging.info('*** Done writing the output ! ***')\n return True\n \n \n \"\"\"\n try:\n a = self.initialize_data( station = station, datasets = datasets ) # reading the input files \n dummy = self.merge_all_data() \n logging.info('*** Finished merging, now writing the output netCDF file ***' ) \n a = self.write_merged_file()\n logging.info('*** Done writing the output ! ***')\n return True\n except:\n print('Failed: ' , station )\n return False \n \"\"\"", "def fire(self):\n if (self.job):\n job = self.job\n try:\n job.run()\n logger.debug(\"Job run. Setting status to done.\")\n self.status = 'done'\n except Exception:\n logger.error(\"Caught exception. Setting status to fail and deleting output.\")\n dfs.delete(self.outputpath)\n self.status = 'fail'", "def __process_end_merging(self, finished, manager_data, tracker):\n\n if finished or self.autosave.is_time_exceed():\n while \"PyF\" in \" \".join([x.name for x in reversed(active_children())]):\n continue\n\n self.__merge_processes_data(manager_data, tracker=tracker)\n\n return True\n return False", "def tellJobDone(self, clipboard=None):\n origid = self.jobclient.getOriginatorId()\n if clipboard:\n if clipboard.has_key(\"originatorId\"):\n origid = clipboard.get(\"originatorId\")\n else:\n self.log.log(Log.WARN, \"OriginatorId not found on clipboard\")\n print \"DEBUG: clipboard keys:\", str(clipboard.keys())\n if len(self.dataclients) > 0:\n self.log.log(Log.INFO-5, \"reporting the completed files\")\n self.tellDataReady(clipboard)\n self.jobclient.tellDone(self.jobsuccess, origid)", "def merge_wrapper(processdir, basedir, starglob, superstarglob, calibrootglob, njobs=2, invert=False):\n for glob in [starglob, superstarglob, calibrootglob]:\n assert path.dirname(glob), \\\n f\"Glob : {glob} should be/contain a subdirectory\"\n\n superstarGlobNew = get_glob_strings(superstarglob)\n calibrootGlob1, calibrootGlob2 = get_glob_strings(calibrootglob)\n superstardir = get_dir_from_glob(processdir, superstarglob)\n calibdir = get_dir_from_glob(basedir, calibrootglob)\n starglob = processdir + starglob\n\n # ssmcolfnames = converter(superstardir,\n # globstr1=superstarGlobNew,\n # globstr2=superstarGlobNew,\n # njobs=42,\n # mergecolsonly=True)\n # yecho(\"SuperStarfiles done.\")\n # tofiltercalibglob = converter(processdir,\n # globstr1=calibrootGlob1,\n # globstr2=calibrootGlob2,\n # njobs=42,\n # mergecolsonly=False)\n # yecho(\"Extracting done.\")\n tofiltercalibglob = \"./csv/*.csv\"\n ssmcolfnames = glob_and_check(\"./superstar/mergecols/*.csv\")\n\n yecho(\"Removing events.\")\n if njobs > 1:\n splitcalib = split_by_dates(tofiltercalibglob)\n splitstar = split_by_dates(starglob)\n splitss = split_by_dates(ssmcolfnames)\n # needs filename output\n assert len(splitcalib) == len(splitstar) == len(splitss), \"only works the first time when no calibfiles got moved, for everything else this needs a new function with more logic\"\n Parallel(n_jobs=njobs)\\\n (delayed(single_remove_events)(calibglob, starglob, ssglob, njobs, invert)\n for calibglob, starglob, ssglob in zip(splitcalib, splitstar, splitss))\n # filteredFiles = [f for arr in filteredFiles for f in arr]\n else:\n check_telescope_files(rootdir=None, globstr1=ssmcolfnames,\n globstr2=calibmcolfnames, replacer=(\"_Y_\", \"_I_\"))\n remover = EventRemover(tofiltercalibglob=tofiltercalibglob,\n starglob=starglob,\n superstarmcolglob=ssmcolfnames)\n remover.remove_events()\n filteredFiles = remover.outfilenames\n yecho(\"Removed events that get thrown out during image cleaning and superstar processing and wrote the merged runs to:\")\n yecho(f\"{path.basename(filteredFiles[0])}\")\n # return filteredFiles", "def do_status(self, args):\n status = self._leet.job_status\n\n for job in self.finished_jobs:\n status.append({\"id\" : job.id,\n \"hostname\" : job.machine.hostname,\n \"plugin\": job.plugin_instance.LEET_PG_NAME,\n \"status\" : job.status})\n if status:\n pretty_jobs_status(status)\n else:\n print(\"***No jobs pending\")", "def collect(self,outfilename):\n # TODO actually gather results and check if run is successful\n if os.path.isfile(outfilename):\n self.completed=True\n else:\n self.completed=False", "def merge(self , station = '' , datasets = '' , mode = 'test'):\n\n if mode == \"test\": \n a = self.initialize_data( station = station, datasets = datasets ) # reading the input files \n dummy = self.merge_all_data() \n #logging.info('*** Finished merging, now writing the output netCDF file ***' ) \n #a = self.write_merged_file()\n #logging.info('*** Done writing the output ! ***')\n return True\n\n else:\n o = open(\"FAILED_MERGING_LIST.txt\", 'a+') \n try:\n a = self.initialize_data( station = station, datasets = datasets ) # reading the input files \n dummy = self.merge_all_data() \n #logging.info('*** Finished merging, now writing the output netCDF file ***' ) \n #a = self.write_merged_file()\n #logging.info('*** Done writing the output ! ***')\n return True \n except MemoryError:\n print('Failed: ' , station )\n o.write(station + '\\n' )\n return False", "def maybe_commit(job):", "def addMergeVCFReplicateHaplotypesJobs(self, workflow, inputData=None, db_vervet=None, transferOutput=True,\\\n\t\t\t\t\t\tmaxContigID=None, outputDirPrefix=\"\",replicateIndividualTag='copy', refFastaFList=None ):\n\t\tsys.stderr.write(\"Adding MergeVCFReplicateHaplotype jobs for %s vcf files ... \"%(len(inputData.jobDataLs)))\n\t\tno_of_jobs= 0\n\t\t\n\t\t\n\t\ttopOutputDir = \"%sMergeVCFReplicateHaplotypeStat\"%(outputDirPrefix)\n\t\ttopOutputDirJob = self.addMkDirJob(outputDir=topOutputDir)\n\t\tno_of_jobs += 1\n\t\t\n\t\t\n\t\thaplotypeDistanceMergeFile = File(os.path.join(topOutputDir, 'haplotypeDistanceMerge.tsv'))\n\t\thaplotypeDistanceMergeJob = self.addStatMergeJob(workflow, statMergeProgram=workflow.mergeSameHeaderTablesIntoOne, \\\n\t\t\t\t\t\t\toutputF=haplotypeDistanceMergeFile, transferOutput=False, parentJobLs=[topOutputDirJob])\n\t\tmajoritySupportMergeFile = File(os.path.join(topOutputDir, 'majoritySupportMerge.tsv'))\n\t\tmajoritySupportMergeJob = self.addStatMergeJob(workflow, statMergeProgram=workflow.mergeSameHeaderTablesIntoOne, \\\n\t\t\t\t\t\t\toutputF=majoritySupportMergeFile, transferOutput=False, parentJobLs=[topOutputDirJob])\n\t\tno_of_jobs += 2\n\t\t\n\t\treturnData = PassingData()\n\t\treturnData.jobDataLs = []\n\t\tfor jobData in inputData.jobDataLs:\n\t\t\tinputF = jobData.vcfFile\n\t\t\t\n\t\t\tinputFBaseName = os.path.basename(inputF.name)\n\t\t\tcommonPrefix = inputFBaseName.split('.')[0]\n\t\t\toutputVCF = File(os.path.join(topOutputDir, '%s.vcf'%(commonPrefix)))\n\t\t\tdebugHaplotypeDistanceFile = File(os.path.join(topOutputDir, '%s.haplotypeDistance.tsv'%(commonPrefix)))\n\t\t\tdebugMajoritySupportFile = File(os.path.join(topOutputDir, '%s.majoritySupport.tsv'%(commonPrefix)))\n\t\t\t#2012.4.2\n\t\t\tfileSize = utils.getFileOrFolderSize(yh_pegasus.getAbsPathOutOfFile(inputF))\n\t\t\tmemoryRequest = 45000\n\t\t\tmemoryRequest = min(42000, max(4000, int(38000*(fileSize/950452059.0))) )\n\t\t\t\t#extrapolates (33,000Mb memory for a ungzipped VCF file with size=950,452,059)\n\t\t\t\t#upper bound is 42g. lower bound is 4g.\n\t\t\t#mergeReplicateOutputF = File(os.path.join(trioCallerOutputDirJob.folder, '%s.noReplicate.vcf'%vcfBaseFname))\n\t\t\t#noOfAlignments= len(alignmentDataLs)\n\t\t\t#entireLength = stopPos - startPos + 1\t#could be very small for shorter reference contigs\n\t\t\t#memoryRequest = min(42000, max(4000, int(20000*(noOfAlignments/323.0)*(entireLength/2600000.0))) )\n\t\t\t\t#extrapolates (20000Mb memory for a 323-sample + 2.6Mbase reference length/26K loci)\n\t\t\t\t#upper bound is 42g. lower bound is 4g.\n\t\t\tmergeVCFReplicateColumnsJob = self.addMergeVCFReplicateGenotypeColumnsJob(workflow, \\\n\t\t\t\t\t\t\t\texecutable=workflow.MergeVCFReplicateHaplotypesJava,\\\n\t\t\t\t\t\t\t\tGenomeAnalysisTKJar=workflow.GenomeAnalysisTKJar, \\\n\t\t\t\t\t\t\t\tinputF=inputF, outputF=outputVCF, \\\n\t\t\t\t\t\t\t\treplicateIndividualTag=replicateIndividualTag, \\\n\t\t\t\t\t\t\t\trefFastaFList=refFastaFList, \\\n\t\t\t\t\t\t\t\tdebugHaplotypeDistanceFile=debugHaplotypeDistanceFile, \\\n\t\t\t\t\t\t\t\tdebugMajoritySupportFile=debugMajoritySupportFile,\\\n\t\t\t\t\t\t\t\tparentJobLs=[topOutputDirJob]+jobData.jobLs, \\\n\t\t\t\t\t\t\t\textraDependentInputLs=[], transferOutput=False, \\\n\t\t\t\t\t\t\t\textraArguments=None, job_max_memory=memoryRequest)\n\t\t\t\n\t\t\t#add output to some reduce job\n\t\t\tself.addInputToStatMergeJob(statMergeJob=haplotypeDistanceMergeJob, \\\n\t\t\t\t\t\t\t\tinputF=mergeVCFReplicateColumnsJob.outputLs[1] , \\\n\t\t\t\t\t\t\t\tparentJobLs=[mergeVCFReplicateColumnsJob])\n\t\t\tself.addInputToStatMergeJob(statMergeJob=majoritySupportMergeJob, \\\n\t\t\t\t\t\t\t\tinputF=mergeVCFReplicateColumnsJob.outputLs[2] , \\\n\t\t\t\t\t\t\t\tparentJobLs=[mergeVCFReplicateColumnsJob])\n\t\t\tno_of_jobs += 1\n\t\tsys.stderr.write(\"%s jobs. Done.\\n\"%(no_of_jobs))\n\t\t\n\t\treturnData.jobDataLs.append(PassingData(jobLs=[haplotypeDistanceMergeJob], file=haplotypeDistanceMergeFile, \\\n\t\t\t\t\t\t\t\t\t\t\tfileLs=[haplotypeDistanceMergeFile]))\n\t\treturnData.jobDataLs.append(PassingData(jobLs=[majoritySupportMergeJob], file=majoritySupportMergeFile, \\\n\t\t\t\t\t\t\t\t\t\t\tfileLs=[majoritySupportMergeFile]))\n\t\t#2012.7.21 gzip the final output\n\t\tnewReturnData = self.addGzipSubWorkflow(workflow=workflow, inputData=returnData, transferOutput=transferOutput,\\\n\t\t\t\t\t\toutputDirPrefix=\"\")\n\t\treturn newReturnData", "def change_merged(self, event):\n pass", "def markJobDone(self, job, success=True):\n with self:\n with self.queues.jobsInProgress:\n with self.queues.jobsDone:\n try:\n index = self.queues.jobsInProgress.index(job)\n except ValueError, ex:\n raise BlackboardUpdateError(\"Job not found in jobsInProgress: \" +\n job.getProperty(Props.NAME, \"(unidentified)\"))\n job = self.queues.jobsInProgress.pop(index)\n job.markSuccessful(success)\n self.queues.jobsDone.append(job)", "def test_empty_output_successful(self):\n\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n manifest['job']['interface']['outputs'] = {}\n\n json_data = {\n 'manifest': manifest,\n 'configuration': self.configuration\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})", "def write_merge_script(s,inputs=[]):\n assert len(inputs)>0\n # hadd determines if we are merging main histograms file, or unfolding files\n hadd = True if s.jobtype == \"MRG\" else False\n s.jobfile = os.path.join(s.submitdir, 'merge_wasym.sh' if hadd else 'munfold_wasym.sh')\n s.outROOT = ('root_' if hadd else 'unfold_')+s.tag+\".root\"\n s.outROOTpath = os.path.join('results','ana_wasym',s.outROOT)\n pre = 'merge' if hadd else 'munfold'\n s.outOU = os.path.join(s.submitdir, pre+'_wasym.out.log')\n s.outER = os.path.join(s.submitdir, pre+'_wasym.err.log')\n s.outLOG = os.path.join(s.submitdir, pre+'_wasym.log.log')\n flist = 'wasym.root.list' if hadd else 'wasym.unfold.list'\n s.outputs += [flist]\n f = open(s.jobfile, \"w\")\n print >>f, SH_PRE%(s.fdic[0],s.fdic[1])\n print >>f,'RMODE=merge'\n print >>f,'nexpected=%d'%len(inputs)\n print >>f,'ntot=0'\n print >>f,'rm -f ${ROOTDIR}/%s ; touch ${ROOTDIR}/%s;'%(flist,flist)\n for fin in inputs:\n fname = fin if hadd else '%s.unfold'%fin\n print >>f,'f=\"${RESDIR}/%s.root\"'%fname\n print >>f,'st=`xrd uct3-xrd.mwt2.org existfile $f`'\n print >>f,'if [ \"$st\" == \"The file exists.\" ]; then'\n # xrootd files: reduce cache size, since hadd is stupid and will eat 100% of RAM\n print >>f,'echo ${RESHOST}/$f?cachesz=1000000 >> ${ROOTDIR}/%s'%flist\n print >>f,'((ntot++))'\n print >>f,'else'\n print >>f,'echo ERROR: failed to locate file $f'\n print >>f,'fi'\n print >>f,'if [ \"$ntot\" -eq \"$nexpected\" ]; then echo \"ALL DONE\"; else echo \"ERROR: missing `expr $nexpected - $ntot` files\"; echo exit 202; exit 202; fi'\n print >>f,'if [ \"$ntot\" -eq \"0\" ]; then echo \"ERROR: no files to merge\"; echo exit 203; exit 203; fi'\n print >>f,\"\"\"\n# a special version of hadd that adds files in chunks of 20\nfunction hadd2() {\n local per\n per=30 #20\n fin=$1\n opts=$2\n fout=$3\n shift\n n=`cat $fin | wc -l`\n ngrp=`expr $n / $per`\n nrem=`expr $n % $per`\n if [ \\\"$nrem\\\" == \\\"0\\\" ]; then ngrp=`expr $ngrp - 1`; fi\n for igrp in `seq 0 $ngrp`; do\n\timin=`expr $per \\* $igrp`\n\timax=`expr $per \\* $igrp + $per`\n\tif [ \\\"$imax\\\" -gt \\\"$n\\\" ]; then imax=`expr $per \\* $igrp + $nrem`; fi\n\t# offset by 1\n\timin=`expr $imin + 1`\n\timax=`expr $imax`\n\tidel=`expr $imax - $imin + 1`\n\techo \\\"===== Part $igrp / $ngrp : $imin to $imax\\\"\n\techo hadd ${opts} \\\"${fout}.TMPHADD_${igrp}.root\\\" `cat $fin | head -n $imax | tail -n $idel`\n\thadd ${opts} \\\"${fout}.TMPHADD_${igrp}.root\\\" `cat $fin | head -n $imax | tail -n $idel`\n\tst=$?\n\tif [ \\\"$st\\\" != \\\"0\\\" ]; then\n\t echo \\\"ERROR: merge step $igrp failed. Bailing out...\\\"\n\t return $st\n\tfi\n done\n # remove opts to speed up the last step and prevent creation of additional ntuple cycles;2\n echo hadd ${fout} ${fout}.TMPHADD_*root*\n hadd ${fout} ${fout}.TMPHADD_*root*\n st=$?\n rm -f ${fout}.TMPHADD_*root*\n return $st\n}\n \"\"\"\n if False:\n if hadd:\n print >>f, 'echo hadd -O %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n print >>f, 'hadd -O %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n else:\n print >>f, 'echo hadd -T %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n print >>f, 'hadd -T %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n else:\n print >>f, 'hadd2 ${ROOTDIR}/%s \"%s\" %s'%(flist,\"-O\" if hadd else \"-T\",s.outROOTpath)\n print >>f, \"status=$?\"\n print >>f, SH_POST\n f.close()\n os.system('chmod +x %s'%s.jobfile)\n s.write_submit_script()\n return True", "def CheckIfJobFinished(jobid, numseq, to_email, g_params): # {{{\n bsname = \"job_final_process\"\n path_result = os.path.join(g_params['path_static'], 'result')\n rstdir = os.path.join(path_result, jobid)\n gen_logfile = g_params['gen_logfile']\n gen_errfile = g_params['gen_errfile']\n name_server = g_params['name_server']\n g_params['jobid'] = jobid\n g_params['numseq'] = numseq\n g_params['to_email'] = to_email\n jsonfile = os.path.join(rstdir, f\"{bsname}.json\")\n myfunc.WriteFile(json.dumps(g_params, sort_keys=True), jsonfile, \"w\")\n binpath_script = os.path.join(g_params['webserver_root'], \"env\", \"bin\")\n\n finished_idx_file = \"%s/finished_seqindex.txt\"%(rstdir)\n failed_idx_file = \"%s/failed_seqindex.txt\"%(rstdir)\n py_scriptfile = os.path.join(binpath_script, f\"{bsname}.py\")\n finished_idx_list = []\n failed_idx_list = []\n if os.path.exists(finished_idx_file):\n finished_idx_list = list(set(myfunc.ReadIDList(finished_idx_file)))\n if os.path.exists(failed_idx_file):\n failed_idx_list = list(set(myfunc.ReadIDList(failed_idx_file)))\n\n lockname = f\"{bsname}.lock\"\n lock_file = os.path.join(g_params['path_result'], g_params['jobid'],\n lockname)\n\n num_processed = len(finished_idx_list)+len(failed_idx_list)\n if num_processed >= numseq: # finished\n if ('THRESHOLD_NUMSEQ_CHECK_IF_JOB_FINISH' in g_params\n and numseq <= g_params['THRESHOLD_NUMSEQ_CHECK_IF_JOB_FINISH']):\n cmd = [\"python\", py_scriptfile, \"-i\", jsonfile]\n (isSubmitSuccess, t_runtime) = webcom.RunCmd(cmd, gen_logfile, gen_errfile)\n elif not os.path.exists(lock_file):\n bash_scriptfile = f\"{rstdir}/{bsname},{name_server},{jobid}.sh\"\n code_str_list = []\n code_str_list.append(\"#!/bin/bash\")\n cmdline = f\"python {py_scriptfile} -i {jsonfile}\"\n code_str_list.append(cmdline)\n code = \"\\n\".join(code_str_list)\n myfunc.WriteFile(code, bash_scriptfile, mode=\"w\", isFlush=True)\n os.chmod(bash_scriptfile, 0o755)\n os.chdir(rstdir)\n cmd = ['sbatch', bash_scriptfile]\n cmdline = \" \".join(cmd)\n verbose = False\n if 'DEBUG' in g_params and g_params['DEBUG']:\n verbose = True\n webcom.loginfo(\"Run cmdline: %s\"%(cmdline), gen_logfile)\n (isSubmitSuccess, t_runtime) = webcom.RunCmd(cmd, gen_logfile, gen_errfile, verbose)\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"isSubmitSuccess: %s\"%(str(isSubmitSuccess)), gen_logfile)", "def merge(self, sample):\n # Set the assembly file to 'NA' as assembly is not desirable for metagenomes\n sample.general.assemblyfile = 'NA'\n # Can only merge paired-end\n if len(sample.general.fastqfiles) == 2:\n outpath = os.path.join(sample.general.outputdirectory, 'merged_reads')\n make_path(outpath)\n # Merge path - keep all the merged FASTQ files in one directory\n merge_path = os.path.join(self.path, 'merged_reads')\n make_path(merge_path)\n # Set the name of the merged, and unmerged files\n sample.general.mergedreads = \\\n os.path.join(merge_path, '{}_paired.fastq.gz'.format(sample.name))\n log = os.path.join(outpath, 'log')\n error = os.path.join(outpath, 'err')\n try:\n if not os.path.isfile(sample.general.mergedreads):\n # Run the merging command\n out, err, cmd = bbtools.bbmerge(forward_in=sorted(sample.general.trimmedcorrectedfastqfiles)[0],\n merged_reads=sample.general.mergedreads,\n mix=True,\n returncmd=True,\n threads=self.cpus)\n write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None)\n with open(log, 'w') as log_file:\n log_file.write(out)\n with open(error, 'w') as error_file:\n error_file.write(err)\n except (CalledProcessError, IndexError):\n delattr(sample.general, 'mergedreads')\n # Set the name of the report to store the metagenome file merging results\n report = os.path.join(self.reportpath, 'merged_metagenomes.csv')\n # Extract the total number of reads, and the number of reads that could be paired from the bbmerge\n # err stream\n num_reads, num_pairs = self.reads(error)\n # If the report doesn't exist, create it with the header and the results from the first sample\n if not os.path.isfile(report):\n with open(report, 'w') as report_file:\n report_file.write('Sample,TotalReads,PairedReads\\n{sample},{total},{paired}\\n'\n .format(sample=sample.name,\n total=num_reads,\n paired=num_pairs))\n # If the report exists, open it to determine which samples have already been added - useful if re-running\n # the analysis\n else:\n lines = list()\n with open(report, 'r') as report_file:\n for line in report_file:\n lines.append(line.split(',')[0])\n # Add the results to the report\n if sample.name not in lines:\n with open(report, 'a+') as report_file:\n report_file.write('{sample},{total},{paired}\\n'\n .format(sample=sample.name,\n total=num_reads,\n paired=num_pairs))", "def status_of_old_dump_is_done(self, runner, date, job_name, job_desc):\n old_dump_runinfo_filename = self._get_dump_runinfo_filename(date)\n status = self._get_status_from_runinfo(old_dump_runinfo_filename, job_name)\n if status == \"done\":\n return 1\n if status is not None:\n # failure, in progress, some other useless thing\n return 0\n\n # ok, there was no info there to be had, try the index file. yuck.\n index_filename = os.path.join(runner.wiki.public_dir(),\n date, runner.wiki.config.perdump_index)\n status = self._get_status_from_html(index_filename, job_desc)\n if status == \"done\":\n return 1\n return 0", "def _add_job_data(\n self,\n job: Job,\n ) -> Tuple[str, bool]:\n jid = job.job_id()\n try:\n job_result = job.result()\n self._add_result_data(job_result, jid)\n LOG.debug(\"Job data added [Job ID: %s]\", jid)\n # sets the endtime to be the time the last successful job was added\n self.end_datetime = datetime.now()\n return jid, True\n except Exception as ex: # pylint: disable=broad-except\n # Handle cancelled jobs\n status = job.status()\n if status == JobStatus.CANCELLED:\n LOG.warning(\"Job was cancelled before completion [Job ID: %s]\", jid)\n return jid, False\n if status == JobStatus.ERROR:\n LOG.error(\n \"Job data not added for errored job [Job ID: %s]\\nError message: %s\",\n jid,\n job.error_message(),\n )\n return jid, False\n LOG.warning(\"Adding data from job failed [Job ID: %s]\", job.job_id())\n raise ex", "def is_job_complete(self, job_id):\n\n job_status = self.get_job_progress(job_id)\n complete = job_status['completed']\n total = job_status['total']\n if (complete == total):\n return True\n else:\n return False", "def detect_completion(self):\n results_dir = glob.glob(f\"{self.production.rundir}\")\n if len(results_dir)>0: # dynesty_merge_result.json\n if len(glob.glob(os.path.join(results_dir[0], f\"extrinsic_posterior_samples.dat\"))) > 0:\n return True\n else:\n return False\n else:\n return False", "def isFinished(self):\r\n try:\r\n output = Popen(\"qstat | grep \"+self.jobId, shell=True, stdout=PIPE, stderr=PIPE).communicate()[0]\r\n if self.jobId in output:\r\n if output.split()[4] == \"Eqw\":\r\n #If the job fails, print a warning, and wait a minute so the user can check why the job fails,\r\n #before resubmitting the job.\r\n logging.warning(\"job \" + output.split()[2] + \" failed to run, resubmitting in one minute\")\r\n time.sleep(60)\r\n output = Popen(\"qdel \"+self.jobId, shell=True, stdout=PIPE, stderr=PIPE).communicate()[0]\r\n self.submit()\r\n return False\r\n else:\r\n logging.info(\"job with ID: \" + self.jobId + \" is finished.\")\r\n return True\r\n \r\n except ValueError:\r\n logging.info(\"Error: waiting for not submitted job...\")", "def on_job_result(self, job, result):\n job_id = job.tag\n parities = result.histogram(key='out',\n fold_func=lambda bits: np.sum(bits) % 2)\n self._zeros[job_id] += parities[0]\n self._ones[job_id] += parities[1]", "def _job_was_successful(self, status):\n success = True\n\n # https://cloud.google.com/life-sciences/docs/reference/rest/v2beta/Event\n for event in status[\"metadata\"][\"events\"]:\n\n logger.debug(event[\"description\"])\n\n # Does it always result in fail for other failure reasons?\n if \"failed\" in event:\n success = False\n action = event.get(\"failed\")\n logger.debug(\"{}: {}\".format(action[\"code\"], action[\"cause\"]))\n\n elif \"unexpectedExitStatus\" in event:\n action = event.get(\"unexpectedExitStatus\")\n\n if action[\"exitStatus\"] != 0:\n success = False\n\n # Provide reason for the failure (desc includes exit code)\n msg = \"%s\" % event[\"description\"]\n if \"stderr\" in action:\n msg += \": %s\" % action[\"stderr\"]\n logger.debug(msg)\n\n return success", "def statusJob(self, job):\n with self.thread_lock:\n name = job.name\n job_container = self.shared_dags[job]\n job_dag = job_container.getDAG()\n\n # If there is no timing, then the job is not finished\n if job_container.getTime():\n job_container.addCaveat('time: ' + job_container.getTime())\n if job.getResult() == False:\n self.active.remove(job)\n self.killJobs()\n return\n else:\n self.job_queue_count -= 1\n job_dag.delete_node(job)\n self.active.remove(job)\n if self.args.download_only:\n result = ' -Downloaded | '\n else:\n result = ' --Finished | '\n\n else:\n result = ' Launching | '\n\n # Format job name length field\n name_cnt = (self.term_width - len(job.name)) + 2 # 2 character buffer\n result = strftime(\"%H:%M\") + result + job.name + ' '*name_cnt\n\n # Format caveat length\n caveats = job_container.getCaveats()\n caveat_cnt = self.max_caveat_length - len(caveats)\n\n if caveats:\n result = result + caveats + ' '*caveat_cnt\n else:\n result = result + ' '*caveat_cnt\n\n remaining = job_dag.size()\n print(result, \"remaining: %-3d active: %-2d\" % (remaining, len(self.active)), [x.name for x in self.active])", "def process_job(q):\n del log_msg[:]\n logger.info('Processing Job %s', q.id)\n\n datatype = q.datatype\n input_dir = q.input_dir\n output_dir = q.output_dir\n processor = q.processor\n if datatype.lower() == 'laz':\n block_name = proper_block_name(input_dir)\n elif datatype.lower() == 'ortho':\n block_name = proper_block_name_ortho(input_dir)\n if datatype.lower() == 'laz' or datatype.lower() == 'ortho':\n logger.info('Verifying las tiles in directory...')\n log_msg.append('Verifying las tiles in directory...\\n')\n has_error, remarks = verify_dir(input_dir, datatype.lower())\n\n if has_error:\n assign_status(q, error=True)\n log_msg.append('Error in verify_las/verify_raster!\\n {0} \\n'.format(remarks))\n else:\n logger.info('Renaming tiles...')\n\n logger.info('BLOCK NAME %s', block_name)\n log_msg.append('BLOCK NAME {0}\\n'.format(block_name))\n\n in_coverage, block_uid = find_in_coverage(block_name)\n\n #: Check first if folder or `block_name` is in `Cephgeo_LidarCoverageBlock`\n #: If not found, `output_dir` is not created and data is not processed\n if in_coverage:\n logger.info('Found in Lidar Coverage model %s %s',\n block_name, block_uid)\n log_msg.append('Found in Lidar Coverage model {0} {1}\\n'.format(\n block_name, block_uid))\n\n rename_tiles(input_dir, output_dir, processor,\n block_name, block_uid, q)\n logger.info('Status %s Status Timestamp %s',\n q.status, q.status_timestamp)\n log_msg.append('Status {0} Status Timestamp {1}\\n'.format(\n q.status, q.status_timestamp))\n\n else:\n has_error = True\n logger.error('ERROR NOT FOUND IN MODEL %s %s', block_name, block_uid)\n log_msg.append('ERROR NOT FOUND IN MODEL {0} {1}\\n'.format(block_name, block_uid))\n assign_status(q, error=True)\n # for DEM\n else:\n logger.info('Handler not implemented for type: %s',\n str(q.datatype))\n log_msg.append('Handler not implemented for type: {0}\\n'.format(\n str(q.datatype)))\n assign_status(q, error=True)\n\n paragraph = ''\n for par in log_msg:\n paragraph = paragraph + par\n\n #: Save log messages from renaming tiles to `Automation_AutomationJob.log`\n with PSQL_DB.atomic() as txn:\n new_q = (Automation_AutomationJob\n .update(data_processing_log=paragraph, status_timestamp=datetime.now())\n .where(Automation_AutomationJob.id == q.id))\n new_q.execute()", "def _success_finish(self):\n # run this task after scrapy process successfully finished\n # cache result, if there is at least one scraped item\n time.sleep(2) # let the data to be dumped into the output file?\n self._update_items_scraped()\n if self.items_scraped:\n self.save_cached_result()\n else:\n logger.warning('Not caching result for task %s (%s) '\n 'due to no scraped items.',\n self.task_data.get('task_id'),\n self.task_data.get('server_name'))\n logger.info('Success finish task #%s', self.task_data.get('task_id', 0))\n self.finished_ok = True", "def indicate_success(self):\n pass", "def completed_file(self, context):", "def merge():\n click.echo(\"Not implemented yet. In the future, this command will be used for merging models.\")\n sys.exit(-2)", "def test_successful(self):\n\n url = '/%s/job-types/status/' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 4)\n self.assertEqual(result['results'][0]['job_type']['name'], self.job1.job_type.name)\n self.assertEqual(result['results'][0]['job_counts'][0]['count'], 1)", "def result(self, result: osbuild.pipeline.BuildResult):", "def success(self):\n self.succeeded = True", "def was_successful(self):\n return self._build_proto.status == common.SUCCESS", "def test_all_merge(self):\n\n test_folder = os.path.join('test_data', 'merging_tests', 'batch_test')\n # test_folder = base_path + '/test_data/merging_tests/batch_test/'\n results_folder = os.path.join(test_folder, 'results')\n # results_folder = test_folder+\"results/\"\n\n if not os.path.isdir(results_folder):\n os.mkdir(results_folder)\n\n # delete all files in output folder\n for the_file in os.listdir(results_folder):\n file_path = os.path.join(results_folder, the_file)\n if os.path.isfile(file_path):\n os.unlink(file_path)\n\n backgrounds_folder = os.path.join(test_folder, 'backgrounds')\n obj_poses_folder = os.path.join(test_folder, 'object_poses')\n\n mi.generate_for_all_objects(obj_poses_folder, backgrounds_folder, results_folder, adjust_brightness = True)\n self.assertEqual(len(os.listdir(obj_poses_folder)), len(os.listdir(results_folder)))\n\n for the_file in os.listdir(results_folder):\n file_path = os.path.join(results_folder, the_file)\n im = Image.open(file_path)\n self.assertEqual((300,300), im.size)\n self.assertEqual('JPEG', im.format)\n self.assertNotEqual('PNG', im.format)", "def has_merge(self) -> Optional[str]:\n return None", "def merge_job_info(run, seqno, slices):\n inset = {\"job_info\": [\"workscript.stdout\", \"workscript.stderr\"],\n }\n outset = {\"job_info\": [\"std_{0:06d}_{1:03d}.out\", \"std_{0:06d}_{1:03d}.err\"],\n }\n tarset = {\"job_info\": \"job_info_{0:06d}_{1:03d}.tgz\",\n }\n badslices = []\n slicepatt = re.compile(r\"([1-9][0-9]*),([1-9][0-9]*)/\")\n for iset in inset:\n outlist = []\n for i in range(0, len(inset[iset])):\n ofile = outset[iset][i].format(run, seqno)\n with open(ofile, \"w\") as ostr:\n for sl in slices:\n ifile = \"{0},{1}/\".format(sl[0], sl[1]) + inset[iset][i]\n for lines in open(ifile):\n ostr.write(lines)\n outlist.append(ofile)\n tarfile = tarset[iset].format(run, seqno)\n cmd = subprocess.Popen([\"tar\", \"zcf\", tarfile] + outlist,\n stderr=subprocess.PIPE)\n elog = cmd.communicate()\n if cmd.returncode != 0:\n for eline in elog[1].decode(\"ascii\").split('\\n'):\n badslice = slicepatt.search(eline)\n if badslice:\n badslices.append(\"{0},{1}\".format(badslice.group(1),\n badslice.group(2)))\n sys.stderr.write(eline + '\\n')\n sys.stderr.write(\"Error on output file {0}\".format(tarfile) +\n \" - job logs tarballing failed!\\n\")\n sys.stderr.flush()\n continue\n odir = output_area + \"/\" + iset + \"/{0:06d}\".format(run)\n upload(tarfile, odir)\n return badslices", "def generateFinishOutput(self, job):\n return []", "def _check_step_completed(self, i):\n\n module, _ = self._get_command_config(i)\n status = self._get_status_obj()\n submitted = self._check_jobs_submitted(status, module)\n if not submitted:\n return_code = 1\n else:\n return_code = self._get_module_return_code(status, module)\n\n return return_code", "def completeMerge(self):\n #--Remove lists that aren't the sum of at least two esps.\n srcMods = self.srcMods\n for levls in (self.levcs,self.levis):\n for listId in levls.keys():\n if len(srcMods[listId]) < 2 or levls[listId].isDeleted:\n self.records.remove(levls[listId])\n del levls[listId]\n del srcMods[listId]\n #--Log\n log = self.log\n for label, levls in (('Creature',self.levcs), ('Item',self.levis)):\n if not len(levls): continue\n log.setHeader(_('Merged %s Lists:') % (label,))\n for listId in sorted(levls.keys(),key=lambda a: a.lower() ):\n log(listId)\n for mod in srcMods[listId]:\n log(' '+mod)", "def test_job_output(self):\n self.run_generator_job()\n job = CountSkillCompletion(self.app_context).load()\n output = jobs.MapReduceJob.get_results(job)\n expected = [[str(self.skill1.id), 3, 0],\n [str(self.skill2.id), 1, 1],\n [str(self.skill3.id), 0, 0]]\n self.assertEqual(sorted(expected), sorted(output))\n\n template_values = {}\n SkillMapDataSource.fill_values(self.app_context, template_values, job)\n self.assertEqual(transforms.loads(template_values['counts']), [\n [self.skill1.name, 3, 0],\n [self.skill2.name, 1, 1],\n [self.skill3.name, 0, 0]])", "def _process_finished(self, process):\n self._state = JobState.FINISHED", "def block_until_ready(self) -> GeneratorResult:\n logger.info(\"Waiting for jobs to finish.\")\n if isinstance(self.job, ManagedJobSet):\n js_results = self.job.results()\n job_result = js_results.combine_results()\n else:\n job_result = self.job.result()\n logger.info(\"All jobs finished, transforming job results.\")\n\n self.raw_bits_list = self._ibmq_result_transform(job_result)\n self._format_wsr()\n if self.saved_fn:\n try:\n os.remove(self.saved_fn)\n except Exception: # pylint: disable=broad-except\n logger.warning(\"Unable to delete file %s\", self.saved_fn)\n return GeneratorResult(wsr=self.formatted_wsr, raw_bits_list=self.raw_bits_list,\n backend=self.backend)", "def _finalize_job(self, job: Job) -> None:\n self._jobs.remove(job)\n self._finalized_jobs[job.task_name][job.status] += 1", "def new_result(self, job, update_model=True):\n\t\tif not job.exception is None:\n\t\t\tself.logger.warning(\"job {} failed with exception\\n{}\".format(job.id, job.exception))", "def test_successful(self):\n\n url = '/%s/jobs/' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 3)\n for entry in result['results']:\n expected = None\n if entry['id'] == self.job1.id:\n expected = self.job1\n elif entry['id'] == self.job2.id:\n expected = self.job2\n elif entry['id'] == self.job3.id:\n expected = self.job3\n else:\n self.fail('Found unexpected result: %s' % entry['id'])\n self.assertEqual(entry['job_type']['name'], expected.job_type.name)\n self.assertEqual(entry['job_type_rev']['job_type']['id'], expected.job_type.id)\n self.assertEqual(entry['is_superseded'], expected.is_superseded)", "def is_finished(self):\n if self.status.finished:\n return True\n if len(self._job_name_lst) > 0:\n return False\n return self.check_all_childs_finished()", "def mark_import_job_complete(self, report_id: int,\n job: bigquery.LoadJob) -> None:\n self.delete_document(Type._JOBS, report_id)\n self.store_document(Type._COMPLETED, report_id, job.to_api_repr())", "def end_job(self, job: 'JobAdapter',\n label: str,\n job_name: str,\n ) -> bool:\n if job.job_status[0] != 'done' or job.job_status[1]['status'] != 'done':\n try:\n job.determine_job_status() # Also downloads the output file.\n except IOError:\n if job.job_type not in ['orbitals']:\n logger.warning(f'Tried to determine status of job {job.job_name}, '\n f'but it seems like the job never ran. Re-running job.')\n self._run_a_job(job=job, label=label)\n if job_name in self.running_jobs[label]:\n self.running_jobs[label].pop(self.running_jobs[label].index(job_name))\n\n if job.job_status[1]['status'] == 'errored' and job.job_status[1]['keywords'] == ['memory']:\n original_mem = job.job_memory_gb\n if 'insufficient job memory' in job.job_status[1]['error'].lower():\n job.job_memory_gb *= 3\n logger.warning(f'Job {job.job_name} errored because of insufficient memory. '\n f'Was {original_mem} GB, rerunning job with {job.job_memory_gb} GB.')\n self._run_a_job(job=job, label=label)\n elif 'memory requested is too high' in job.job_status[1]['error'].lower():\n used_mem = None\n if 'used only' in job.job_status[1]['error']:\n used_mem = int(job.job_status[1]['error'][-2])\n logger.warning(f'Job {job.job_name} errored because the requested memory is too high. '\n f'Was {original_mem} GB, rerunning job with {job.job_memory_gb} GB.')\n job.job_memory_gb = used_mem * 4.5 if used_mem is not None else job.job_memory_gb * 0.5\n self._run_a_job(job=job, label=label)\n\n if not os.path.isfile(job.local_path_to_output_file) and not job.execution_type == 'incore':\n job.rename_output_file()\n if not os.path.isfile(job.local_path_to_output_file) and not job.execution_type == 'incore':\n if 'restart_due_to_file_not_found' in job.ess_trsh_methods:\n job.job_status[0] = 'errored'\n job.job_status[1]['status'] = 'errored'\n logger.warning(f'Job {job.job_name} errored because for the second time ARC did not find the output '\n f'file path {job.local_path_to_output_file}.')\n elif job.job_type not in ['orbitals']:\n job.ess_trsh_methods.append('restart_due_to_file_not_found')\n logger.warning(f'Did not find the output file of job {job.job_name} with path '\n f'{job.local_path_to_output_file}. Maybe the job never ran. Re-running job.')\n self._run_a_job(job=job, label=label)\n if job_name in self.running_jobs[label]:\n self.running_jobs[label].pop(self.running_jobs[label].index(job_name))\n return False\n\n if job.job_status[0] != 'running' and job.job_status[1]['status'] != 'running':\n if job_name in self.running_jobs[label]:\n self.running_jobs[label].pop(self.running_jobs[label].index(job_name))\n self.timer = False\n job.write_completed_job_to_csv_file()\n logger.info(f' Ending job {job_name} for {label} (run time: {job.run_time})')\n if job.job_status[0] != 'done':\n return False\n if job.job_adapter in ['gaussian', 'terachem'] and os.path.isfile(os.path.join(job.local_path, 'check.chk')) \\\n and job.job_type in ['opt', 'optfreq', 'composite']:\n check_path = os.path.join(job.local_path, 'check.chk')\n if os.path.isfile(check_path):\n if 'directed_scan' in job.job_name and 'cont' in job.directed_scan_type:\n folder_name = 'rxns' if job.is_ts else 'Species'\n r_path = os.path.join(self.project_directory, 'output', folder_name, job.species_label, 'rotors')\n if not os.path.isdir(r_path):\n os.makedirs(r_path)\n shutil.copyfile(src=check_path, dst=os.path.join(r_path, 'directed_rotor_check.chk'))\n self.species_dict[label].checkfile = os.path.join(r_path, 'directed_rotor_check.chk')\n else:\n self.species_dict[label].checkfile = check_path\n if job.job_type == 'scan' or job.directed_scan_type == 'ess':\n for rotors_dict in self.species_dict[label].rotors_dict.values():\n if rotors_dict['pivots'] in [job.pivots, job.pivots[0]]:\n rotors_dict['scan_path'] = job.local_path_to_output_file\n self.save_restart_dict()\n return True", "def testMarkTaskAsMerging(self):\n redis_client = self._CreateRedisClient()\n\n session = sessions.Session()\n task = tasks.Task(session_identifier=session.identifier)\n\n # Trying to mark a task as merging without finalizing it raises an error.\n with self.assertRaises(IOError):\n redis_store.RedisStore.MarkTaskAsMerging(\n task.identifier, session.identifier, redis_client=redis_client)\n\n # Opening and closing a writer for a task should cause the task to be marked\n # as complete.\n storage_writer = writer.RedisStorageWriter(\n storage_type=definitions.STORAGE_TYPE_TASK)\n storage_writer.Open(\n redis_client=redis_client, session_identifier=task.session_identifier,\n task_identifier=task.identifier)\n storage_writer.Close()\n\n redis_store.RedisStore.MarkTaskAsMerging(\n task.identifier, session.identifier, redis_client=redis_client)", "def _get_job_status(self):\n total_hits = session.query(BoxHit).filter_by(training_job_id=self.id).count()\n num_hits_left = session.query(BoxHit).filter_by(training_job_id=self.id, outstanding=True).count()\n total_urls = self.num_urls\n num_urls_left = session.query(VideoTrainingURL).filter_by(job=self, processed=False).count()\n faces_obtained = MTurkBox.query.filter_by(label=self.evaluator.target_label, result=True).count()\n return '\\n'.join([\n '------------- Stats for Job ID: %s -------------' % str(self.id) ,\n 'Job for Label : %s' % self.label.name,\n 'Total URLs : %d' % total_urls,\n 'Total HITs : %d' % total_hits,\n 'unprocessed URLS : %d' % num_urls_left,\n 'outstanding Hits : %d' % num_hits_left,\n 'Job Finish Status : %s' % self.finished,\n 'Faces Obtained : %d' % faces_obtained,\n ]) + '\\n'", "def test_successful(self):\n\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n\n json_data = {\n 'manifest': manifest,\n 'configuration': self.configuration\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})", "def job_status(self) -> JobStatus:\n statuses = set()\n with self._jobs.lock:\n\n # No jobs present\n if not self._jobs:\n return JobStatus.DONE\n\n statuses = set()\n for job in self._jobs.values():\n if job:\n statuses.add(job.status())\n\n # If any jobs are in non-DONE state return that state\n for stat in [\n JobStatus.ERROR,\n JobStatus.CANCELLED,\n JobStatus.RUNNING,\n JobStatus.QUEUED,\n JobStatus.VALIDATING,\n JobStatus.INITIALIZING,\n ]:\n if stat in statuses:\n return stat\n\n return JobStatus.DONE", "def step_parallel(in_csv_filename, terrestrial_data, marine_data, ancillary_path,\n out_csv_filename, from_gbif=True):\n csv_filename_pairs, header = get_chunk_files(\n in_csv_filename, out_csv_filename=out_csv_filename)\n\n# in_csv_fn, out_csv_fn = csv_filename_pairs[0]\n# intersect_csv_and_shapefiles(in_csv_fn, terrestrial_data,\n# marine_data, ancillary_path, out_csv_fn, False)\n\n with ProcessPoolExecutor() as executor:\n for in_csv_fn, out_csv_fn in csv_filename_pairs:\n executor.submit(\n intersect_csv_and_shapefiles, in_csv_fn, terrestrial_data,\n marine_data, ancillary_path, out_csv_fn, from_gbif)\n\n try:\n outf = open(out_csv_filename, 'w', encoding='utf-8')\n outf.write('{}'.format(header))\n smfile_linecount = 0\n for _, small_csv_fn in csv_filename_pairs:\n curr_linecount = get_line_count(small_csv_fn) - 1\n print('Appending {} records from {}'.format(\n curr_linecount, small_csv_fn))\n # Do not count header\n smfile_linecount += (curr_linecount)\n lineno = 0\n try:\n for line in open(small_csv_fn, 'r', encoding='utf-8'):\n # Skip header in each file\n if lineno == 0:\n pass\n else:\n outf.write('{}'.format(line))\n lineno += 1\n except Exception as inner_err:\n print('Failed to write {} to merged file; {}'.format(small_csv_fn, inner_err))\n except Exception as outer_err:\n print('Failed to write to {}; {}'.format(out_csv_filename, outer_err))\n finally:\n outf.close()\n\n lgfile_linecount = get_line_count(out_csv_filename) - 1\n print('Total {} of {} records written to {}'.format(\n lgfile_linecount, smfile_linecount, out_csv_filename))", "def test_aggregation(self):\n file1 = DarshanIngestedJobFile(name=\"file1\")\n file2a = DarshanIngestedJobFile(name=\"file2\")\n file2b = DarshanIngestedJobFile(name=\"file2\")\n file3 = DarshanIngestedJobFile(name=\"file3\")\n\n file1.bytes_read = 123\n file2a.bytes_read = 456\n file2b.bytes_read = 789\n file3.bytes_read = 12\n\n file1.bytes_written = 345\n file2a.bytes_written = 678\n file2b.bytes_written = 901\n file3.bytes_written = 234\n\n file1.write_count = 3\n file2a.write_count = 4\n file2b.write_count = 5\n file3.write_count = 6\n\n file1.read_count = 7\n file2a.read_count = 8\n file2b.read_count = 9\n file3.read_count = 10\n\n file1.open_count = 1\n file2a.open_count = 2\n file2b.open_count = 3\n file3.open_count = 4\n\n job1 = DarshanIngestedJob(label=\"jobA\", time_start=123, file_details={\n \"file1\": file1,\n \"file2\": file2a\n })\n\n job2 = DarshanIngestedJob(label=\"jobA\", time_start=456, file_details={\n \"file2\": file2b,\n \"file3\": file3\n })\n\n # We should be able to aggregate the jobs!\n job1.aggregate(job2)\n self.assertEqual(job1.time_start, 123)\n self.assertEqual(len(job1.file_details), 3)\n self.assertEqual({\"file1\", \"file2\", \"file3\"}, set(job1.file_details.keys()))\n\n f1 = job1.file_details[\"file1\"]\n self.assertEqual(f1.read_count, 7)\n self.assertEqual(f1.write_count, 3)\n self.assertEqual(f1.open_count, 1)\n self.assertEqual(f1.bytes_read, 123)\n self.assertEqual(f1.bytes_written, 345)\n\n f2 = job1.file_details[\"file2\"]\n self.assertEqual(f2.read_count, 17)\n self.assertEqual(f2.write_count, 9)\n self.assertEqual(f2.open_count, 5)\n self.assertEqual(f2.bytes_read, 1245)\n self.assertEqual(f2.bytes_written, 1579)\n\n f3 = job1.file_details[\"file3\"]\n self.assertEqual(f3.read_count, 10)\n self.assertEqual(f3.write_count, 6)\n self.assertEqual(f3.open_count, 4)\n self.assertEqual(f3.bytes_read, 12)\n self.assertEqual(f3.bytes_written, 234)", "def test_java_job_result(self):\n test_app = self._create_app()\n class_path = \"spark.jobserver.JavaHelloWorldJob\"\n job = self._create_job(test_app, class_path,\n ctx=self._get_functional_java_context())\n time.sleep(3)\n self._wait_till_job_is_done(job)\n job = self.client.jobs.get(job.jobId)\n self.assertEqual(\"FINISHED\", job.status)\n self.assertEqual(\"Hi!\", job.result)", "def _finish_job_complete_minimal(app_id, app_module):\n # set up constants\n job_id = \"6046b539ce9c58ecf8c3e5f3\"\n job_output = {\"version\": \"1.1\", \"id\": job_id, \"result\": [{\"foo\": \"bar\"}]}\n user = \"someuser\"\n gitcommit = \"somecommit\"\n resources = {\"fake\": \"condor\", \"resources\": \"in\", \"here\": \"yo\"}\n sched = \"somescheduler\"\n\n # set up mocks\n sdkmr = create_autospec(SDKMethodRunner, spec_set=True, instance=True)\n logger = create_autospec(Logger, spec_set=True, instance=True)\n mongo = create_autospec(MongoUtil, spec_set=True, instance=True)\n kafka = create_autospec(KafkaClient, spec_set=True, instance=True)\n catalog = create_autospec(Catalog, spec_set=True, instance=True)\n condor = create_autospec(Condor, spec_set=True, instance=True)\n sdkmr.get_mongo_util.return_value = mongo\n sdkmr.get_logger.return_value = logger\n sdkmr.get_kafka_client.return_value = kafka\n sdkmr.get_condor.return_value = condor\n sdkmr.get_catalog.return_value = catalog\n\n # set up return values for mocks. Ordered as per order of operations in code\n job1 = _finish_job_complete_minimal_get_test_job(\n job_id, sched, app_id, gitcommit, user\n )\n job2 = _finish_job_complete_minimal_get_test_job(\n job_id, sched, app_id, gitcommit, user\n )\n job2.status = Status.completed.value\n\n sdkmr.get_job_with_permission.side_effect = [job1, job2]\n mongo.get_job.return_value = job2 # gets the job 3x...?\n condor.get_job_resource_info.return_value = resources\n\n # call the method\n JobsStatus(sdkmr).finish_job(job_id, job_output=job_output) # no return\n\n # check mocks called as expected. Ordered as per order of operations in code\n\n sdkmr.get_job_with_permission.assert_has_calls(\n [\n call(\n job_id=job_id, requested_job_perm=JobPermissions.WRITE, as_admin=False\n ),\n call(\n job_id=job_id, requested_job_perm=JobPermissions.WRITE, as_admin=False\n ),\n ]\n )\n logger.debug.assert_has_calls(\n [\n call(\"Finishing job with a success\"),\n # depending on stable dict ordering for this test to pass\n call(f\"Extracted the following condor job ads {resources}\"),\n ]\n )\n mongo.finish_job_with_success.assert_called_once_with(job_id, job_output)\n kafka.send_kafka_message.assert_called_once_with(\n KafkaFinishJob(\n job_id=job_id,\n new_status=Status.completed.value,\n previous_status=Status.running.value,\n scheduler_id=sched,\n error_code=None,\n error_message=None,\n )\n )\n mongo.get_job.assert_called_once_with(job_id)\n les_expected = {\n \"user_id\": user,\n \"func_module_name\": \"module\",\n \"func_name\": \"method_id\",\n \"git_commit_hash\": gitcommit,\n \"creation_time\": ObjectId(\n job_id\n ).generation_time.timestamp(), # from Job ObjectId\n \"exec_start_time\": ObjectId(job_id).generation_time.timestamp() + 5,\n \"finish_time\": ObjectId(job_id).generation_time.timestamp() + 10,\n \"is_error\": 0,\n \"job_id\": job_id,\n }\n if app_id:\n app_id = app_id.split(\"/\")[-1]\n les_expected.update({\"app_id\": app_id, \"app_module_name\": app_module})\n catalog.log_exec_stats.assert_called_once_with(les_expected)\n mongo.update_job_resources.assert_called_once_with(job_id, resources)\n\n # Ensure that catalog stats were not logged for a job that was created but failed before running\n bad_running_timestamps = [-1, 0, None]\n for timestamp in bad_running_timestamps:\n log_exec_stats_call_count = catalog.log_exec_stats.call_count\n update_finished_job_with_usage_call_count = (\n mongo.update_job_resources.call_count\n )\n job_id2 = \"6046b539ce9c58ecf8c3e5f4\"\n subject_job = _finish_job_complete_minimal_get_test_job(\n job_id2,\n sched,\n app_id,\n gitcommit,\n user,\n )\n subject_job.running = timestamp\n subject_job.status = Status.created.value\n sdkmr.get_job_with_permission.side_effect = [subject_job, subject_job]\n JobsStatus(sdkmr).finish_job(subject_job, job_output=job_output) # no return\n assert catalog.log_exec_stats.call_count == log_exec_stats_call_count\n assert (\n mongo.update_job_resources.call_count\n == update_finished_job_with_usage_call_count\n )" ]
[ "0.65560216", "0.6231087", "0.61976314", "0.61811936", "0.6096661", "0.60207766", "0.58854157", "0.5847686", "0.5811745", "0.5810917", "0.57712907", "0.572245", "0.5717534", "0.5705085", "0.5678648", "0.56590325", "0.5635989", "0.5614332", "0.5596668", "0.55745363", "0.5560874", "0.552546", "0.55237395", "0.5520285", "0.55158424", "0.55148494", "0.5511424", "0.5505536", "0.55052555", "0.54951584", "0.5489306", "0.5484164", "0.54764974", "0.54680455", "0.5463729", "0.5443637", "0.5435548", "0.53963333", "0.5387521", "0.53861433", "0.5382057", "0.5378272", "0.5362147", "0.53395027", "0.5339216", "0.531043", "0.5309969", "0.5309415", "0.52976036", "0.5282704", "0.5266739", "0.52622867", "0.52590203", "0.52433664", "0.5243348", "0.5241838", "0.5236627", "0.5228546", "0.5224973", "0.5223485", "0.52075464", "0.52022976", "0.51840454", "0.5170995", "0.5167593", "0.5149046", "0.514598", "0.5145206", "0.5143387", "0.51227355", "0.5118445", "0.5117915", "0.5116509", "0.51126856", "0.50948685", "0.50928783", "0.50846267", "0.50845414", "0.5081974", "0.50799805", "0.50730723", "0.50710875", "0.50709814", "0.5069721", "0.5066433", "0.50617576", "0.50579405", "0.5056059", "0.50543183", "0.50516367", "0.504553", "0.50385725", "0.5035866", "0.5029595", "0.50152546", "0.5008037", "0.50065637", "0.50036615", "0.50031763", "0.500124" ]
0.67897856
0
_jobFailed_ A job has failed. Non merge jobs are ignored. Since it is a general failure, the error handler has tried to submit the job a number of times and it always failed. Mark then all input files as 'unmerged' (which will increment their failures counter) and the output file as 'failed'. If limit of failures in input files is reached, they are tagged as 'invalid'
def jobFailed(self, jobName): # ignore non merge jobs if jobName.find('mergejob') == -1: logging.info("Ignoring job %s, since it is not a merge job" \ % jobName) # Add cleanup flag for non merge jobs too logging.info("trigger cleanup for: %s" % jobName) try: self.trigger.setFlag("cleanup", jobName, "MergeAccountant") except (ProdAgentException, ProdException): logging.error("trying to continue processing failure event") return # files can be cleaned up now logging.info("trigger cleanup for: %s" % jobName) try: self.trigger.setFlag("cleanup", jobName, "MergeAccountant") except (ProdAgentException, ProdException): logging.error("trying to continue processing failure event") # verify enable condition if not self.enabled: return # open a DB connection database = MergeSensorDB() # start a transaction database.startTransaction() # get job information try: jobInfo = database.getJobInfo(jobName) # cannot get it! except Exception, msg: logging.error("Cannot process Failure event for job %s: %s" \ % (jobName, msg)) database.closeDatabaseConnection() return # check that job exists if jobInfo is None: logging.error("Job %s does not exist." % jobName) database.closeDatabaseConnection() return # check status if jobInfo['status'] != 'undermerge': logging.error("Cannot process Failure event for job %s: %s" \ % (jobName, "the job is not currently running")) database.closeDatabaseConnection() return # get dataset id datasetId = database.getDatasetId(jobInfo['datasetName']) # mark all input files as 'unmerged' (or 'invalid') unFinishedFiles = [] for fileName in jobInfo['inputFiles']: # update status newStatus = database.updateInputFile(\ datasetId, fileName, \ status = "unmerged", \ maxAttempts = int(self.args['MaxInputAccessFailures'])) # add invalid files to list of non finished files if newStatus == 'invalid': unFinishedFiles.append(fileName) # mark output file as 'failed' database.updateOutputFile(datasetId, jobName=jobName, status='failed') # commit changes database.commit() # notify the PM about the unrecoverable files if len(unFinishedFiles) > 0: File.merged(unFinishedFiles, True) # log message logging.info("Job %s failed, file information updated." % jobName) # close connection database.closeDatabaseConnection()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fail(self):\n self.cleanup()\n self.runner.report_job_fail(self.id)", "def handle_job_error(self, job):\n super().handle_job_error(job)\n\n self._handle_job_status(job, \"failed\")", "def runjob(job):\n inputfiles = glob.glob(job['InputFile'])\n logecho(' %s file/s found for %s: ' %\n (len(inputfiles), job['InputFile']))\n\n # process files, order by most recent\n inputfiles.sort(key=os.path.getmtime, reverse=True)\n for inputfile in inputfiles:\n inputfile_error = False\n inputfile_errordetails = ''\n t1_startdt = datetime.now()\n starttime = t1_startdt.strftime('%Y-%m-%d %H:%M:%S')\n t1_start = perf_counter()\n dupecount = 0\n dupesremoved = 0\n resource_id = ''\n\n logecho(' Processing: %s...' % inputfile)\n\n def custom_date_parser(x): return dateparser.parse(\n x, date_formats=dateformats_list)\n\n df = pd.read_csv(inputfile, parse_dates=[\n datecolumn], date_parser=custom_date_parser,\n skipinitialspace=True)\n\n if job['Dedupe']:\n pkey_list = list(job['PrimaryKey'].split(','))\n\n # first, count number of dupe rows for logging\n dupecount = df.duplicated(subset=pkey_list, keep='first').sum()\n\n dedupe_flag = job['Dedupe']\n if dedupe_flag == 'first' or dedupe_flag == 'last':\n df.drop_duplicates(\n subset=pkey_list, keep=dedupe_flag, inplace=True)\n dupesremoved = dupecount\n\n colname_list = df.columns.tolist()\n\n coltype_list = []\n for column in df:\n coltype_list.append(get_col_dtype(df[column]))\n\n fields_dictlist = []\n for i in range(0, len(colname_list)):\n fields_dictlist.append({\n \"id\": colname_list[i],\n \"type\": coltype_list[i][0]\n })\n if coltype_list[i][0] == 'timestamp':\n df[colname_list[i]] = df[colname_list[i]].astype(str)\n\n logecho('FIELDS_DICTLIST: %s' % fields_dictlist, level='debug')\n\n data_dict = df.to_dict(orient='records')\n\n # check if resource exists\n # this works only when TargetResource is an existing\n # resource id hash\n try:\n resource = portal.action.resource_show(\n id=job['TargetResource'])\n except:\n logecho(' Resource \"%s\" is not a resource id.' %\n job['TargetResource'])\n resource = ''\n else:\n existing_resource_desc = resource['description']\n\n if not resource:\n # resource doesn't exist. Check if package exists\n try:\n package = portal.action.package_show(\n id=job['TargetPackage'])\n except:\n package = ''\n\n if not package:\n # package doesn't exist. Create it\n # first, check if TargetOrg exist\n logecho(' Creating package \"%s\"...' %\n job['TargetPackage'])\n\n if not (job['TargetOrg'] in org_list):\n errmsg = 'TargetOrg \"%s\" does not exist!' % job['TargetOrg']\n logecho(errmsg, level='error')\n sys.exit(errmsg)\n\n try:\n package = portal.action.package_create(\n name=job['TargetPackage'],\n private=False,\n owner_org=job['TargetOrg']\n )\n except Exception as e:\n logecho(' Cannot create package \"%s\"!' %\n job['TargetPackage'], level='error')\n inputfile_error = True\n inputfile_errordetails = str(e)\n package = ''\n else:\n logecho(' Created package \"%s\"...' %\n job['TargetPackage'])\n else:\n logecho(' Package \"%s\" found...' % job['TargetPackage'])\n\n logecho('PACKAGE: %s\\n\\nFIELDS: %s' %\n (package, fields_dictlist), level='debug')\n # logecho('RECORDS: %s\\n' % data_dict, level='debug')\n\n # now check if resource name already exists in package\n resource_exists = False\n existing_resource_desc = ''\n resources = package.get('resources')\n for resource in resources:\n if resource['name'] == job['TargetResource']:\n resource_exists = True\n existing_resource_desc = resource['description']\n resource_id = resource['id']\n break\n\n #resource_id = ''\n if package and resource_exists:\n\n if job['Truncate']:\n try:\n result = portal.action.datastore_delete(\n resource_id=resource['id'],\n force=True\n )\n except Exception as e:\n logecho(' Truncate failed',\n level='error')\n inputfile_error = True\n inputfile_errordetails = str(e)\n\n logecho(' \"%s\" (%s) exists in package \"%s\". Doing datastore_upsert...' % (\n job['TargetResource'], resource['id'], job['TargetPackage']))\n try:\n result = portal.action.datastore_upsert(\n force=True,\n resource_id=resource['id'],\n records=data_dict,\n method='upsert',\n calculate_record_count=True\n )\n except Exception as e:\n logecho(' Upsert failed', level='error')\n inputfile_error = True\n inputfile_errordetails = str(e)\n else:\n logecho(' Upsert successful! %s rows...' %\n len(data_dict))\n #resource_id = result['resource_id']\n #resource_id = resource['id']\n else:\n logecho(' \"%s\" does not exist in package \"%s\". Doing datastore_create...' % (\n job['TargetResource'], job['TargetPackage']))\n\n alias = '%s-%s-%s' % (job['TargetOrg'],\n job['TargetPackage'], job['TargetResource'])\n resource = {\n \"package_id\": package['id'],\n \"format\": \"csv\",\n \"name\": job['TargetResource']\n }\n try:\n resource = portal.action.datastore_create(\n force=True,\n resource=resource,\n aliases='',\n fields=fields_dictlist,\n records=data_dict,\n primary_key=job['PrimaryKey'],\n indexes=job['PrimaryKey'],\n calculate_record_count=False\n )\n except Exception as e:\n logecho(' Cannot create resource \"%s\"!' %\n job['TargetResource'], level='error')\n inputfile_error = True\n inputfile_errordetails = str(e)\n else:\n logecho(' Created resource \"%s\"...' %\n job['TargetResource'])\n resource_id = resource['resource_id']\n resource = portal.action.datastore_create(\n force=True,\n resource_id=resource_id,\n aliases=alias,\n calculate_record_count=True\n )\n\n logecho('EXISTING DESC for resource %s: %s' %\n (resource_id, existing_resource_desc), level='debug')\n updated_desc = ''\n if existing_resource_desc:\n result = re.split(r' \\(UPDATED: (.*?)\\)$',\n existing_resource_desc)\n if len(result) == 3:\n # there is an old update date\n updated_desc = result[0]\n else:\n updated_desc = existing_resource_desc\n updated_desc = updated_desc + ' (UPDATED: %s)' % starttime\n logecho('RESOURCE UPDATED DESC: %s: %s' %\n (resource_id, updated_desc), level='debug')\n portal.action.resource_update(\n id=resource_id,\n description=updated_desc)\n\n logecho('RESOURCE: %s' % resource, level='debug')\n\n if job['Stats'] and resource_id:\n logecho(' Computing stats...')\n result = computestats(\n job['Stats'], job['PrimaryKey'], package, resource_id, job['TargetResource'],\n starttime)\n\n t1_stop = perf_counter()\n t1_stopdt = datetime.now()\n endtime = t1_stopdt.strftime('%Y-%m-%d %H:%M:%S')\n elapsed = t1_stop - t1_start\n\n if inputfile_error:\n # inputfile processing failed, move to problemsdir\n try:\n shutil.move(inputfile, problemsdir + '/' +\n os.path.basename(inputfile))\n except Exception as e:\n errmsg = 'Cannot move %s to %s: %s' % (\n inputfile, problemsdir, str(e))\n logecho(errmsg, level='error')\n problems_logger.error(errmsg)\n\n error_details = '- FILE: %s START: %s END: %s ELAPSED: %s DUPES: %s/%s ERRMSG: %s' % (\n inputfile, starttime, endtime, elapsed, dupecount, dupesremoved, inputfile_errordetails)\n problems_logger.info(error_details)\n else:\n # inputfile was successfully processed, move to processeddir\n try:\n shutil.move(inputfile, processeddir + '/' +\n os.path.basename(inputfile))\n except Exception as e:\n errmsg = 'Cannot move %s to %s: %s' % (\n inputfile, processeddir, str(e))\n logecho(errmsg, level='error')\n processed_logger.error(errmsg)\n\n processed = len(df.index) if 'df' in locals() else 0\n processed_details = '- FILE: %s START: %s END: %s ELAPSED: %s DUPES: %s/%s PROCESSED: %s' % (\n inputfile, starttime, endtime, elapsed, dupecount, dupesremoved, processed)\n processed_logger.info(processed_details)\n\n logecho(' Processed %s file/s...' % len(inputfiles))", "def test_failed_job(self):\n\n failed_job = json.loads(TREEHERDER_JOB % (\"testfailed\", \"completed\"))\n self.assertEquals(self.query_api.get_job_status(failed_job), FAILURE)", "def on_job_error(\n self,\n scheduler: plugin_jobs.Scheduler,\n job: tools_jobs.Job,\n exc: BaseException,\n ):\n self.error(exception=exc)", "def _validate_jobs(\n self,\n check_nlst_warn: bool = False\n ):\n counter = 0\n for job in self.jobs:\n counter += 1\n print(job.job_id)\n if counter == 0:\n ignore_restarts = False\n else:\n ignore_restarts = True\n\n check_input_files(\n hrldas_namelist=job.hrldas_namelist,\n hydro_namelist=job.hydro_namelist,\n sim_dir=os.getcwd(),\n ignore_restarts=ignore_restarts,\n check_nlst_warn=check_nlst_warn\n )", "def do_task(inputf, input_ext, failed_outputf, outputf, output_ext, tool, jobs, validate):\n print('Checking failed tasks')\n failed = []\n files = get_file_list(inputf, input_ext)\n tmp_dir = create_tmp_dir(tool)\n for f in files:\n match = find_matching_file(inputf + f, inputf, failed_outputf)\n if match is None or is_failed(match, validate, tmp_dir):\n failed.append(inputf + f)\n print('Found {:d} failed tasks.'.format(len(failed)))\n if len(failed) == 0:\n return\n failed = sorted(failed)\n for f in failed:\n print(f)\n print('Creating tasks ...')\n # assumes all input has same extension\n archive_input = (get_extracted_name(os.path.basename(failed[0])) != os.path.basename(failed[0]))\n tmp_dir_input = None\n if archive_input:\n tmp_dir_input = create_tmp_dir(tool)\n tmp_dir_output = None\n if output_ext is not None:\n tmp_dir_output = create_tmp_dir(tool)\n tasks = []\n for f in failed:\n basename = os.path.basename(f)\n output_file = outputf + get_extracted_name(f[len(inputf):]) + output_ext\n tasks.append((tool, f, output_file, archive_input,\n output_ext, tmp_dir_input, tmp_dir_output))\n print('{:d} tasks created.'.format(len(tasks)))\n\n print('Checking for write permission ...')\n permitted_task = []\n not_permitted = []\n for t in tasks:\n if not os.access(t[2], os.W_OK):\n # try removing first\n try:\n os.remove(t[2])\n except IOError:\n pass\n if os.path.exists(t[2]):\n if not os.access(t[2], os.W_OK):\n not_permitted.append(t)\n else:\n permitted_task.append(t)\n\n tasks = permitted_task\n print('{:d} tasks not permitted.'.format(len(not_permitted)))\n print('{:d} tasks left.'.format(len(tasks)))\n print('Running tasks in {:d} jobs ...'.format(jobs))\n if jobs == 1:\n for t in tasks:\n run(t)\n else:\n with Pool(processes=jobs) as p:\n p.map(run, tasks, chunksize=1)\n shutil.rmtree(tmp_dir)\n if tmp_dir_input is not None:\n shutil.rmtree(tmp_dir_input)\n if tmp_dir_output is not None:\n shutil.rmtree(tmp_dir_output)\n print('Tasks finished.')", "def finish(self):\n for msg, info in self.errors.iteritems():\n hosts = [ self.job_to_str_func(job) for job in info['jobs'] ]\n\n max_jobs_num = self.max_jobs_num\n if max_jobs_num < 0 or max_jobs_num > len(hosts):\n hosts_msg = ': %s' % ' '.join(hosts)\n elif max_jobs_num == 0:\n hosts_msg = ''\n else:\n hosts_msg = ': %s (and %s more)' % (' '.join(sorted(hosts)[:self.max_jobs_num]), \\\n len(hosts) - self.max_jobs_num)\n\n ex = info['exception']\n msg = '%s.%s: %s' % (ex.__class__.__module__, \\\n ex.__class__.__name__, \\\n str(ex).split('\\n')[0])\n print >> self.outfile, \"Exception '%s' in %s jobs%s.\" % (msg, len(hosts), hosts_msg)\n print >> self.outfile, exception_description(ex).strip()\n if info['trace'] != None:\n print >> self.outfile, 'Traceback:'\n print >> self.outfile, ''.join(info['trace'])\n\n print >> self.outfile", "def jobFail(job):\n\tif 'a' in job.proc.config._notify.when['pipeline']:\n\t\tlogger.debug('Notifying job fails')\n\t\tEMAIL.send('job', job, 'abort')", "def errors(job_name, jenkins_username, jenkins_token):\n global server\n# job_name = 'enterprise_pe-acceptance-tests_integration-system_pe_full-upgrade_weekend_2016.4.x' # 'enterprise_pe-orchestrator_intn-van-sys-pez-multi_2016.4.x-2016.4.x' # 'enterprise_pe-modules-vanagon-suite_intn-van-sys-pez-multi_daily-pe-modules-2016.4.x'\n server = Jenkins('https://cinext-jenkinsmaster-enterprise-prod-1.delivery.puppetlabs.net', username=jenkins_username, password=jenkins_token)\n info = server.get_job_info(job_name)\n builds = [server.get_build_info(job_name, build['number']) for build in info['builds']]\n failed_build_numbers = [b for b in builds if b['result'] == 'FAILURE']\n last_job_errors = None\n\n counts = defaultdict(int)\n similar = set()\n for build in failed_build_numbers:\n output = server.get_build_console_output(job_name, build['number'])\n finder = get_strategy(output)\n errors = finder(output)\n print \"Errors: {}\".format(errors)\n if last_job_errors:\n seq = difflib.SequenceMatcher(a=last_job_errors, b=errors)\n if seq.ratio() == 1.0:\n counts['exact'] += 1\n if seq.ratio() >= 0.7 and seq.ratio() < 1.0:\n counts['similar'] += 1\n similar.append(errors)\n else:\n last_job_errors = errors\n\n if last_job_errors:\n click.echo('Last job errors were:')\n click.echo('\\t{}'.format('\\n\\t'.join(last_job_errors)))\n\n if last_job_errors and 'exact' in counts:\n click.echo('There were {} jobs that failed with errors exactly the same as the last failed job:'.format(counts['exact']))\n click.echo('\\t{}'.format('\\n\\t'.join(last_job_errors)))\n\n if last_job_errors and 'similar' in counts:\n click.echo('There were {} jobs that failed with experienced similar errors as the last failed job:'.format(counts['exact']))\n click.echo('\\t{}'.format('\\n\\t'.join(last_job_errors)))\n for s in similar:\n click.echo('Additional Failed Job:')\n click.echo('\\t{}'.format('\\n\\t'.join(s)))", "def test_failed_job(self):\n failed_job = json.loads(BASE_JSON % (FAILURE, 1433166610, 1, 1433166609))[0]\n self.assertEquals(self.query_api.get_job_status(failed_job), FAILURE)", "def retry_failed_downloader_jobs() -> None:\n failed_jobs = (\n DownloaderJob.failed_objects.filter(created_at__gt=utils.JOB_CREATED_AT_CUTOFF)\n .order_by(\"created_at\")\n .prefetch_related(\"original_files__samples\")\n )\n\n paginator = Paginator(failed_jobs, utils.PAGE_SIZE, \"created_at\")\n page = paginator.page()\n page_count = 0\n\n if len(page.object_list) <= 0:\n # No failed jobs, nothing to do!\n return\n\n queue_capacity = get_capacity_for_downloader_jobs()\n\n if queue_capacity <= 0:\n logger.info(\n \"Not handling failed (explicitly-marked-as-failure) downloader jobs \"\n \"because there is no capacity for them.\"\n )\n\n while queue_capacity > 0:\n logger.info(\n \"Handling page %d of failed (explicitly-marked-as-failure) downloader jobs!\", page_count\n )\n\n handle_downloader_jobs(page.object_list)\n\n if page.has_next():\n page = paginator.page(page.next_page_number())\n page_count = page_count + 1\n queue_capacity = get_capacity_for_downloader_jobs()\n else:\n break", "def test_job_fail(self):\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n\n module = \"test_fail\"\n name = \"TestFail\"\n job_class = get_job(f\"local/{module}/{name}\")\n job_result = JobResult.objects.create(\n name=job_class.class_path,\n obj_type=self.job_content_type,\n user=None,\n job_id=uuid.uuid4(),\n )\n run_job(data={}, request=None, commit=False, job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_ERRORED)", "async def test_job_gen_failed(my_job_gen):\n\n # Set up callback to get notifications when job state changes.\n job = None\n\n def on_job_update(_job):\n \"\"\"The callback to update `job`.\"\"\"\n nonlocal job\n job = _job\n\n my_job_gen.set_on_update(on_job_update)\n\n # Submit a job which must fail.\n await my_job_gen.job(yieldsteps=1, mustfail=True)\n\n # Process ASGI messages and wait for the job to finish.\n await my_job_gen.process_jobs()\n\n # Check job state when job is done.\n assert job.state == 'ERROR', f'Failed job has wrong state `{job.state}`!'", "def _message_failed_job(self):\n self.ensure_one()\n return _(\"Something bad happened during the execution of the job. \"\n \"More details in the 'Exception Information' section.\")", "def _check_job_completeness(self, jobs):\n for job in concurrent.futures.as_completed(jobs):\n if job.exception():\n raise (job.exception())", "def _tidyAfterRun (self):\n\t\tfailedjobs = []\n\t\tfor i in self.ncjobids:\n\t\t\tjob = self.jobs[i]\n\t\t\tif not job.succeed():\n\t\t\t\tfailedjobs.append (job)\n\t\t\n\t\tif not failedjobs:\t\n\t\t\tself.log ('Successful jobs: ALL', 'debug')\n\t\t\tif callable (self.callback):\t\t\n\t\t\t\tself.log('Calling callback ...', 'debug')\n\t\t\t\tself.callback (self)\n\t\telse:\n\t\t\tfailedjobs[0].showError (len(failedjobs))\n\t\t\tif self.errorhow != 'ignore': \n\t\t\t\tsys.exit (1) # don't go further", "def test_results_errors(self, affiliate_items):\n updater = mock.Mock(side_effect=ValueError())\n batch_job = BatchJob(affiliate_items, updater)\n\n error_count = 0\n for result in batch_job.run():\n error_count += int(result.is_error)\n\n assert error_count == 4", "async def test_job_failed(my_job):\n\n # Set up callback to get notifications when job state changes.\n job = None\n\n def on_job_update(_job):\n \"\"\"The callback to update `job`.\"\"\"\n nonlocal job\n job = _job\n\n my_job.set_on_update(on_job_update)\n\n # Submit a job which must fail.\n await my_job.job(mustfail=True)\n\n # Process ASGI messages and wait for the job to finish.\n await my_job.process_jobs()\n\n # Check a state of the job.\n assert job.state == 'ERROR', f'Failed job has wrong state `{job.state}`!'", "def test_run_job_fail(self):\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n job_name = \"local/test_file_upload_fail/TestFileUploadFail\"\n job_class = get_job(job_name)\n\n job_result = JobResult.objects.create(\n name=job_class.class_path,\n obj_type=self.job_content_type,\n user=None,\n job_id=uuid.uuid4(),\n )\n\n # Serialize the file to FileProxy\n data = {\"file\": self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n\n # Assert that the file was serialized to a FileProxy\n self.assertTrue(isinstance(serialized_data[\"file\"], uuid.UUID))\n self.assertEqual(serialized_data[\"file\"], FileProxy.objects.latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n\n # Run the job\n run_job(data=serialized_data, request=None, commit=False, job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n\n # Assert that file contents were correctly read\n self.assertEqual(\n job_result.data[\"run\"][\"log\"][0][2], f\"File contents: {self.file_contents}\" # \"File contents: ...\"\n )\n # Also ensure the standard log message about aborting the transaction is present\n self.assertEqual(job_result.data[\"run\"][\"log\"][-1][-1], \"Database changes have been reverted due to error.\")\n\n # Assert that FileProxy was cleaned up\n self.assertEqual(FileProxy.objects.count(), 0)", "def put_job_failure(job, message):\n print('Putting job failure')\n print(message)\n code_pipeline.put_job_failure_result(jobId=job, failureDetails={'message': message, 'type': 'JobFailed'})", "def fail_job( self, job_state ):\n self.stop_job( self.sa_session.query( self.app.model.Job ).get( job_state.job_wrapper.job_id ) )\n job_state.job_wrapper.fail( getattr( job_state, \"fail_message\", GENERIC_REMOTE_ERROR ) )", "def failed_jobs(username, root_wf_id, wf_id):\n dashboard = Dashboard(g.master_db_url, root_wf_id, wf_id)\n args = __get_datatables_args()\n\n total_count, filtered_count, failed_jobs_list = dashboard.get_failed_jobs(wf_id, **args)\n\n for job in failed_jobs_list:\n job.exec_job_id = '<a href=\"' + url_for('.job', root_wf_id=root_wf_id, wf_id=wf_id, job_id=job.job_id, job_instance_id=job.job_instance_id) + '\">' + job.exec_job_id + '</a>'\n job.stdout = '<a target=\"_blank\" href=\"' + url_for('.stdout', root_wf_id=root_wf_id, wf_id=wf_id, job_id=job.job_id, job_instance_id=job.job_instance_id) + '\">Application Stdout/Stderr</a>'\n job.stderr = '<a target=\"_blank\" href=\"' + url_for('.stderr', root_wf_id=root_wf_id, wf_id=wf_id, job_id=job.job_id, job_instance_id=job.job_instance_id) + '\">Condor Stderr/Pegasus Lite Log</a>'\n\n return render_template('workflow/jobs_failed.xhr.json', count=total_count, filtered=filtered_count, jobs=failed_jobs_list, table_args=args)", "def validate(self):\n for file_combination in self.get_result_combinations():\n # get to Job info for the given file\n # this is currently a work around\n job_info_a = self.find_job_info(file_combination[0])\n job_info_b = self.find_job_info(file_combination[1])\n\n provider_a = job_info_a['backend']\n provider_b = job_info_b['backend']\n result = {}\n\n # If both jobs returned results we can validate them against each other, we can still run the validation\n # if the files were previously downloaded\n if (job_info_a['download_successful'] and job_info_b['download_successful']) and \\\n (os.path.exists(file_combination[0]) and os.path.exists(file_combination[1])):\n for current_rule in self.ruleEngine.get_rules():\n current_rule.get_name_of_rule\n current_rule.set_results(file_combination)\n current_rule.set_directory(self.directory)\n\n result_of_rule = current_rule.apply()\n if result_of_rule is not None:\n result[current_rule.get_name_of_rule()] = result_of_rule\n else:\n result = {\n 'provider_a_download_successful': job_info_a['download_successful'],\n 'provider_b_download_successful': job_info_b['download_successful'],\n 'description': 'Validation could not be performed as atleast one provider did not return data, '\n 'if there are validation results it is because local results were used'\n }\n\n performance = {\n 'provider_a': job_info_a['time_to_result'],\n 'provider_b': job_info_b['time_to_result'],\n 'unit': 'seconds'\n }\n\n # Create the key for the provider\n if self._report.get(provider_a) is None:\n self._report[provider_a] = {'results': []}\n if self._report.get(provider_b) is None:\n self._report[provider_b] = {'results': []}\n\n self._report[provider_a]['results'].append({\n 'meta-information': {\n 'file_a': file_combination[0],\n 'file_b': file_combination[1],\n 'provider_a': provider_a,\n 'provider_b': provider_b,\n 'job-identifier': job_info_a['job'],\n 'provider-job-id_a': job_info_a['provider_job_id'],\n 'provider-job-id_b': job_info_b['provider_job_id'],\n 'performance': performance\n },\n 'rule_results': result\n })\n self._report[provider_b]['results'].append({\n 'meta-information': {\n 'file_a': file_combination[0],\n 'file_b': file_combination[1],\n 'compared_to_provider': provider_a,\n 'job-identifier': job_info_a['job'],\n 'provider-job-id_a': job_info_a['provider_job_id'],\n 'provider-job-id_b': job_info_b['provider_job_id'],\n 'performance': performance\n },\n 'rule_results': result\n })\n\n print('Images and job results analyzed!')\n print('Saving to report to disk')\n self.save_report()", "def test_gaussian_log_file_error(self):\n job = self.job_list[-2]\n self.assertTrue(isinstance(job, StatMechJob))\n with self.assertRaises(InputError):\n job.load()", "def test_update_with_fail(executable):\n from tempfile import mkdtemp\n from shutil import rmtree\n from pylada.jobfolder.jobfolder import JobFolder\n from pylada.process.jobfolder import JobFolderProcess\n from pylada.process import Fail\n from pylada import default_comm\n from functional import Functional\n\n root = JobFolder()\n for n in xrange(3):\n job = root / str(n)\n job.functional = Functional(executable, [n])\n job.params['sleep'] = 1\n root['1'].functional.order = 666\n root['1'].sleep = None\n supp = JobFolder()\n for n in xrange(3, 6):\n job = supp / str(n)\n job.functional = Functional(executable, [n])\n job.params['sleep'] = 1\n supp['5'].sleep = 0\n supp['5'].functional.order = 666\n\n comm = default_comm.copy()\n comm['n'] = 4\n\n dir = mkdtemp()\n try: \n program = JobFolderProcess(root, nbpools=2, outdir=dir, keepalive=True)\n\n # compute current jobs.\n program.start(comm)\n try: program.wait()\n except Fail: pass\n else: raise Exception()\n assert len(program.errors) == 1\n\n # compute second set of updated jobs\n program.update(supp)\n try: program.wait()\n except Fail: pass\n else: raise Exception()\n assert len(program.errors) == 2\n program.errors.clear()\n\n\n finally:\n try: rmtree(dir)\n except: pass", "def submit_job_bad_shots(backend: IBMQBackend) -> IBMQJob:\n qobj = bell_in_qobj(backend=backend)\n # Modify the number of shots to be an invalid amount.\n qobj.config.shots = backend.configuration().max_shots + 10000\n job_to_fail = backend.run(qobj)\n return job_to_fail", "def cleanup(options, cmdargs, errStream=sys.stdin):\n\n logging.debug(\"Cleanup: retries=%d\" % options.retries)\n exitcode=0\n\n # get list of output flags\n outFileByFlag = getOutputFiles(options, cmdargs)\n logging.debug(\"Outfiles: %s\" % (outFileByFlag))\n\n # name outputfiles\n fileNameBase = getFileNameBase(options.outputFlags, \n outFileByFlag, \n options.jobName)\n\n # remove old output files\n errStreamFile=\"%s.stderr\" % fileNameBase\n failureStreamFile=\"%s.failures\" % fileNameBase\n for file in errStreamFile, failureStreamFile:\n if os.path.exists(file):\n logging.debug('Removing previous file: %s' % file)\n os.remove(file)\n for file in outFileByFlag.values():\n if file is not None:\n if os.path.exists(file):\n logging.debug('Removing previous file: %s' % file)\n os.remove(file)\n\n # set up copy method (some task types might do some filtering)\n copyFilesToStream=taskSpecificCopy.get(options.taskType,addFilesToStream)\n\n # loop until everything is node or we give up\n taskIds=list(range(1,options.splits+1))\n errStream = None\n failureStream = None\n while True:\n logging.debug(\"starting to scan for fraqgments: %r (retries: %d)\" % (taskIds,options.retries))\n # if no output file specified, add STDOUT\n if len(outFileByFlag)==0:\n outFileByFlag['%stdout']=None\n # Change filenames to (filename,None) tuples that can be populated with streams\n for flag in outFileByFlag.keys():\n if flag == '%stdout':\n # special case for STDOUT\n outFileByFlag[flag]=sys.stdout\n elif isinstance(outFileByFlag[flag],list):\n # reset tuples leftover from previous loop\n outFileByFlag[flag][1]=None\n else:\n # Change filenames to (filename,None) tuples that can be populated with streams\n outFileByFlag[flag]=[outFileByFlag[flag],None]\n\n # keep track of things to resubmit\n failedTasks=[]\n anySuccess=False\n missingRecords={}\n # look for files\n for i in taskIds:\n # look for output\n fragName = getFragmentName(options.fragBase, i, options.fragSuff)\n prefix = getFragmentPrefix(options.fragBase,i)\n frag = \"%s%s%s\" % (options.tmpDir, os.sep, fragName)\n fragerr = \"%s.exitcode\" % (frag)\n outfrag = \"%s.stdout\" % (frag)\n errfrag = \"%s.stderr\" % (frag)\n logfrag = \"%s.log\" % (frag)\n outfragmap={}\n\n # For each configured output file, map fragment to final\n for (flag, flagOutFile) in outFileByFlag.items():\n if flag=='%stdout':\n outfragmap[outfrag]=flagOutFile\n else:\n (tmpDir,otheroutfrag) = getOutputFromFlag(flag,fragName,prefix,options.tmpDir,options.tmpDir)\n outfragmap[\"%s%s%s\" % (tmpDir,os.sep,otheroutfrag)]=flagOutFile\n\n if not(os.path.exists(fragerr)):\n\n # copy results\n try :\n anyFile=copyFilesToStream(outfragmap,i,frag)\n\n # save log,stdout, and stderr if loglevel is high\n if os.path.exists(logfrag):\n anyFile=True\n if options.verbose>=2:\n if errStream is None:\n create_parent_dir(errStreamFile)\n errStream = open(errStreamFile, 'w')\n addFileToStream(logfrag,errStream,header=\"## LOGGING from fragment %d:\" % (i))\n if outfrag not in outfragmap:\n addFileToStream(outfrag,errStream,header=\"## STDOUT from fragment %d:\" % (i))\n addFileToStream(errfrag,errStream,header=\"## STDERR from fragment %d:\" % (i))\n\n # delete files (input, error, outputs)\n for f in [frag, outfrag, errfrag, logfrag] \\\n + list(outfragmap.keys()):\n if os.path.exists(f):\n anyFile=True\n os.remove(f)\n\n if anyFile:\n anySuccess=True\n continue\n\n except FailedFragmentException as ffe:\n if len(ffe.records) < options.chunk:\n anySuccess=True\n logging.info(\"Task %d has missing records\" % i)\n missingRecords[i]=ffe\n\n else:\n # there was an error\n logging.info(\"Task %d failed\" % i)\n failedTasks.append(i)\n\n ## If we got here, there was an error!\n\n # make sure error streams are open\n if errStream is None:\n errStream = open(errStreamFile, 'w')\n if failureStream is None:\n failureStream = open(failureStreamFile, 'w')\n\n # append to error streams\n if os.path.exists(logfrag):\n addFileToStream(logfrag,errStream,header=\"## LOGGING from fragment %d:\" % (i))\n else:\n errStream.write(\"## LOGGING not found for fragment %d!\\n\" % (i))\n if os.path.exists(errfrag):\n addFileToStream(errfrag,errStream,header=\"## STDERR from fragment %d:\" % (i))\n else:\n errStream.write(\"## STDERR not found for fragment %d!\\n\" % (i))\n if outfrag not in outfragmap:\n if os.path.exists(outfrag):\n addFileToStream(outfrag,errStream,header=\"## STDOUT from fragment %d:\" % (i))\n else:\n errStream.write(\"## STDOUT not found for fragment %d!\\n\" % (i))\n\n # save failed records to file\n for failfrag in outfragmap:\n if os.path.exists(failfrag):\n if os.path.isdir(failfrag):\n failureStream.write(\"## FAILURES: fragment %d failed.\" % (i))\n # TODO: do something with the failed output\n else:\n addFileToStream(failfrag,failureStream,header=\"## FAILURES: %s from fragment %d:\" % (failfrag,i))\n os.remove(failfrag)\n else:\n failureStream.write(\"## FAILURES: %s not found for fragment %d!\\n\" % (failfrag,i))\n\n # delete files (exitcode, error, outputs) (save input for re-queueing)\n for f in [fragerr, outfrag, errfrag,]:\n if os.path.exists(f):\n os.remove(f)\n\n # Finished scanning fragments\n logging.info(\"Cleanup is done scanning output files: rtrs: %d, aS: %s, fT: %d, mR: %d\" % (options.retries, anySuccess, len(failedTasks), len(missingRecords)))\n\n # close output streams\n for outstream in outfragmap.values():\n if outstream is sys.stdout:\n continue\n if isinstance(outstream,list):\n if outstream[1] is not None:\n outstream[1].close()\n\n # If conditions are right, resubmit any failures:\n if anySuccess and options.retries!=0:\n options.retries-=1\n logging.info(\"Cleanup is checking for anything that needs to be restarted\")\n\n # get the next available task number (i will still be set from loop)\n nextTaskNum=0\n\n # build new input fragment from afiled and missed fragments in\n # subdirectory of tmpDir\n\n # first check tasks that failed completely\n # rename tasks to make them consecutive\n if len(failedTasks)>0:\n nextTaskNum+=reFragmentMissedTasks(failedTasks, options)\n\n # then, if we were able to identify missing records\n if len(missingRecords)>0:\n # build new fragments out of the missed records\n nextTaskNum=buildMissedRecordFragments(missingRecords, options.tmpDir, options.fragBase, nextTaskNum, options.chunk)\n\n # rerun any missed records and failed fragments\n if nextTaskNum>0:\n # finish setting up tmp dir\n options.splits=nextTaskNum-1\n moveNewFragmentsToTmpDir(options,nextTaskNum)\n\n # re-process tmp dir\n options.wait=True\n logging.info(\"Cleanup will restart tasks: %s\" % (options.splits))\n launchJobs(options, cmdargs, errStream=errStream)\n\n # set up list of fragments to check on next cleanup pass\n taskIds = list(range(1,nextTaskNum))\n\n else:\n # everything is complete\n logging.debug(\"All tasks were successful\")\n\n # TODO:\n # Remove failures file if it exists\n\n break\n else:\n # either everything failed or we give up: exit loop\n logging.debug(\"Cleanup will not re-start any tasks.\")\n exitcode=1\n break\n\n\n logging.info(\"Final cleanup\")\n # check contesnts of tasks.err and tasks.out in options.tmpDir\n logging.debug(\"collecting stderr and stdout from fragments\")\n commonerr=\"%s%stasks.err\"%(options.tmpDir,os.sep)\n commonout=\"%s%stasks.out\"%(options.tmpDir,os.sep)\n # if not empty, add to errStream (make sure it's open)\n if os.path.exists(commonerr):\n if os.path.getsize(commonerr)>0:\n if errStream is None:\n errStream = open(errStreamFile, 'w')\n addFileToStream(commonerr,errStream,header=\"## Uncaptured error output from all tasks:\")\n os.remove(commonerr)\n if os.path.exists(commonout):\n if os.path.getsize(commonout)>0:\n if errStream is None:\n errStream = open(errStreamFile, 'w')\n addFileToStream(commonout,errStream,header=\"## Uncaptured standard output from all tasks:\")\n os.remove(commonout)\n\n # warn if any files left\n logging.debug(\"Checking for leftover files\")\n leftoverFiles=os.listdir(options.tmpDir)\n if len(leftoverFiles)>0:\n if errStream is None:\n errStream = open(errStreamFile, 'w')\n errStream.write(\"Files left in %s: %r\" % (options.tmpDir, leftoverFiles))\n for f in leftoverFiles:\n leftoverFilePath=os.sep.join([options.tmpDir,f])\n if os.path.isdir(leftoverFilePath):\n errStream.write(\"Cannot delete directory: %s\" % (f))\n else:\n os.remove(leftoverFilePath)\n\n if errStream is not None:\n errStream.close()\n if failureStream is not None:\n failureStream.close()\n\n # delete directory\n if logging.getLogger().level > logging.DEBUG:\n shutil.rmtree(options.tmpDir)\n else:\n logging.debug(\"NOT removing tmp dir: %s\", options.tmpDir)\n logging.debug(\"cleanup is complete\")\n return exitcode", "def failed(self):\n output = self.__call__()\n return output.failed", "def failed(self) -> None:\n self.failure_count += 1", "def error_analyze(\n self,\n data_dir: Path,\n processed_data_dir: Path,\n result_dir: Path,\n output_report_dir: Path,\n ) -> NoReturn:\n pass", "def setupJobAfterFailure(self, config):\n self.remainingRetryCount = max(0, self.remainingRetryCount - 1)\n logger.warn(\"Due to failure we are reducing the remaining retry count of job %s with ID %s to %s\",\n self, self.jobStoreID, self.remainingRetryCount)\n # Set the default memory to be at least as large as the default, in\n # case this was a malloc failure (we do this because of the combined\n # batch system)\n if self.memory < config.defaultMemory:\n self._memory = config.defaultMemory\n logger.warn(\"We have increased the default memory of the failed job %s to %s bytes\",\n self, self.memory)", "def getFailedJobs(self):\n return self.__failedJobs", "def abort_unnecessary_jobs(self):\n self._update_candidate_range()\n for r in self.revisions:\n if r == self.lkgr:\n break\n if not r.tested or r.failed:\n r.good = True # pragma: no cover\n if r.in_progress:\n r.abort() # pragma: no cover\n for r in self.revisions[self.fkbr.list_index + 1:]:\n if not r.tested or r.failed:\n r.bad = True # pragma: no cover\n if r.in_progress:\n r.abort() # pragma: no cover", "def generate_failure_job(outcome):\n fail_result = Job(\"FAIL\")\n fail_result.processed_by = None\n fail_result.result = {\"outcome\": outcome}\n return fail_result", "async def test_job_async_gen_failed(my_job_async_gen):\n\n # Set up callback to get notifications when job state changes.\n job = None\n\n def on_job_update(_job):\n \"\"\"The callback to update `job`.\"\"\"\n nonlocal job\n job = _job\n\n my_job_async_gen.set_on_update(on_job_update)\n\n # Submit a job which must fail.\n await my_job_async_gen.job(yieldsteps=1, mustfail=True)\n\n # Process ASGI messages and wait for the job to finish.\n await my_job_async_gen.process_jobs()\n\n # Check job state when job is done.\n assert job.state == 'ERROR', f'Failed job has wrong state `{job.state}`!'", "def rerun_cmd(cmd, outfile, max_ntry=2, subdir_failures='failed_outputs', **run_kwargs):\n outfile = Path(outfile)\n ret = -1\n ntry = 1\n while ret != 0 and ntry <= max_ntry:\n ret = sp.run(cmd, **run_kwargs).returncode\n if ret != 0:\n failed_jobs_subdir = outfile.parent.joinpath(subdir_failures)\n if outfile.exists():\n failed_jobs_subdir.mkdir(exist_ok=True)\n outfile_target = failed_jobs_subdir.joinpath(outfile.name)\n print(f\"Move failed output file from {outfile} to {outfile_target}. try #{ntry}\")\n shutil.move(outfile, outfile_target)\n\n ntry += 1\n return ntry - 1", "def _fail_on_bad_torque_start(self):\n for bundle in self._model.batch_get_bundles(state=State.WAITING_FOR_WORKER_STARTUP, bundle_type='run'):\n failure_message = self._read_torque_error_log(bundle.metadata.job_handle)\n if failure_message is None and time.time() - bundle.metadata.last_updated > 20 * 60:\n failure_message = 'Worker failed to start. You may have requested too many resources.'\n if failure_message is not None:\n logger.info('Failing %s: %s', bundle.uuid, failure_message)\n self._model.update_bundle(\n bundle, {'state': State.FAILED,\n 'metadata': {'failure_message': failure_message}})", "def setup_submission_with_error(cls, sess, row_error_submission_id):\n job_values = {\n 'awardFin': [3, 4, 2, \"awardFin.csv\", 100, 100],\n 'appropriations': [1, 4, 2, \"approp.csv\", 2345, 567],\n 'program_activity': [2, 4, 2, \"programActivity.csv\", None, None],\n 'cross_file': [None, 4, 4, 2, None, None, None]\n }\n\n for job_key, values in job_values.items():\n job = FileTests.insert_job(\n sess,\n filetype=values[0],\n status=values[1],\n type_id=values[2],\n submission=row_error_submission_id,\n filename=values[3],\n file_size=values[4],\n num_rows=values[5]\n )\n # Add errors to cross file job\n metadata = ErrorMetadata(\n job_id=job.job_id,\n occurrences=2,\n severity_id=RULE_SEVERITY_DICT['fatal']\n )\n sess.add(metadata)\n sess.commit()", "def office_clean_failed(parser, args, params):\n parser.parse_known_args(args)\n control.clean_failed(params)", "def test_bulk_group_errors(self):\n file_path = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n self.filename_actg_missing_col)\n data = {\n 'bulk_upload' : open(file_path, 'rb'),\n }\n res = self.client.post(self.ag_url, data)\n assert res.status_code == status.HTTP_400_BAD_REQUEST", "def failing_jobs(username, root_wf_id, wf_id):\n dashboard = Dashboard(g.master_db_url, root_wf_id, wf_id)\n args = __get_datatables_args()\n\n total_count, filtered_count, failing_jobs_list = dashboard.get_failing_jobs(wf_id, **args)\n\n for job in failing_jobs_list:\n job.exec_job_id = '<a href=\"' + url_for('.job', root_wf_id=root_wf_id, wf_id=wf_id, job_id=job.job_id, job_instance_id=job.job_instance_id) + '\">' + job.exec_job_id + '</a>'\n job.stdout = '<a target=\"_blank\" href=\"' + url_for('.stdout', root_wf_id=root_wf_id, wf_id=wf_id, job_id=job.job_id, job_instance_id=job.job_instance_id) + '\">Application Stdout/Stderr</a>'\n job.stderr = '<a target=\"_blank\" href=\"' + url_for('.stderr', root_wf_id=root_wf_id, wf_id=wf_id, job_id=job.job_id, job_instance_id=job.job_instance_id) + '\">Condor Stderr/Pegasus Lite Log</a>'\n\n return render_template('workflow/jobs_failing.xhr.json', count=total_count, filtered=filtered_count, jobs=failing_jobs_list, table_args=args)", "def processingFailed(self, reason):\n if self._finishedDeferreds is not None:\n observers = self._finishedDeferreds\n self._finishedDeferreds = None\n for obs in observers:\n obs.errback(reason)", "def test_job_failure(app):\n with worker(app):\n state = wait_for_results(app, length=100, sleep=0.2, maxwait=4)\n\n # Tasks have been delivered and executed.\n assert set(r.return_value for r in all_results(app)) == set(range(100))\n assert len(state.queue.messages) == 0\n\n # Consumer groups behaved properly.\n assert state.queue.info.groups == 1\n assert state.queue.groups[0].pending == 0\n\n # Nothing in the DLQ.\n assert len(state.dead.messages) == 0\n\n # Any scheduled tasks completed and removed.\n assert len(state.schedule) == 0", "def request_failed(self, ignored):\n self._errors += 1", "def test_bad_error(self):\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n manifest['errors'] = [\n {\n 'code': '1',\n 'name': 'error-name-one',\n 'title': 'Error Name',\n 'description': 'Error Description',\n 'category': 'data'\n }\n ]\n json_data = {\n 'manifest': manifest,\n 'configuration': self.configuration\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertFalse(results['is_valid'])\n self.assertEqual(len(results['errors']), 1)\n self.assertEqual(results['errors'][0]['name'], 'JSON_VALIDATION_ERROR')", "def test_job_complete(self):\r\n t = mergeorder(['A', 'B', 'C', 'D', 'E'], 'foo')\r\n self.assertFalse(job_complete(t))\r\n self.assertFalse(job_complete(t.Children[0]))\r\n self.assertFalse(job_complete(t.Children[1].Children[1]))\r\n\r\n self.assertRaises(JobError, job_complete, t.Children[0].Children[0])\r\n\r\n f = 'test_parallel_merge_otus_JOB_COMPLETE_TEST.poll'\r\n self.assertFalse(os.path.exists(f))\r\n\r\n testf = open(f, 'w')\r\n testf.write('0\\n')\r\n testf.close()\r\n t.PollPath = f\r\n t.StartTime = 10\r\n\r\n self.assertTrue(job_complete(t))\r\n self.assertNotEqual(t.EndTime, None)\r\n self.assertNotEqual(t.TotalTime, None)\r\n\r\n testf = open(f, 'w')\r\n testf.write('1\\n')\r\n testf.close()\r\n\r\n self.assertRaises(JobError, job_complete, t)\r\n t.Processed = False\r\n self.assertRaises(JobError, job_complete, t)\r\n\r\n os.remove(f)", "def process_job(q):\n del log_msg[:]\n logger.info('Processing Job %s', q.id)\n\n datatype = q.datatype\n input_dir = q.input_dir\n output_dir = q.output_dir\n processor = q.processor\n if datatype.lower() == 'laz':\n block_name = proper_block_name(input_dir)\n elif datatype.lower() == 'ortho':\n block_name = proper_block_name_ortho(input_dir)\n if datatype.lower() == 'laz' or datatype.lower() == 'ortho':\n logger.info('Verifying las tiles in directory...')\n log_msg.append('Verifying las tiles in directory...\\n')\n has_error, remarks = verify_dir(input_dir, datatype.lower())\n\n if has_error:\n assign_status(q, error=True)\n log_msg.append('Error in verify_las/verify_raster!\\n {0} \\n'.format(remarks))\n else:\n logger.info('Renaming tiles...')\n\n logger.info('BLOCK NAME %s', block_name)\n log_msg.append('BLOCK NAME {0}\\n'.format(block_name))\n\n in_coverage, block_uid = find_in_coverage(block_name)\n\n #: Check first if folder or `block_name` is in `Cephgeo_LidarCoverageBlock`\n #: If not found, `output_dir` is not created and data is not processed\n if in_coverage:\n logger.info('Found in Lidar Coverage model %s %s',\n block_name, block_uid)\n log_msg.append('Found in Lidar Coverage model {0} {1}\\n'.format(\n block_name, block_uid))\n\n rename_tiles(input_dir, output_dir, processor,\n block_name, block_uid, q)\n logger.info('Status %s Status Timestamp %s',\n q.status, q.status_timestamp)\n log_msg.append('Status {0} Status Timestamp {1}\\n'.format(\n q.status, q.status_timestamp))\n\n else:\n has_error = True\n logger.error('ERROR NOT FOUND IN MODEL %s %s', block_name, block_uid)\n log_msg.append('ERROR NOT FOUND IN MODEL {0} {1}\\n'.format(block_name, block_uid))\n assign_status(q, error=True)\n # for DEM\n else:\n logger.info('Handler not implemented for type: %s',\n str(q.datatype))\n log_msg.append('Handler not implemented for type: {0}\\n'.format(\n str(q.datatype)))\n assign_status(q, error=True)\n\n paragraph = ''\n for par in log_msg:\n paragraph = paragraph + par\n\n #: Save log messages from renaming tiles to `Automation_AutomationJob.log`\n with PSQL_DB.atomic() as txn:\n new_q = (Automation_AutomationJob\n .update(data_processing_log=paragraph, status_timestamp=datetime.now())\n .where(Automation_AutomationJob.id == q.id))\n new_q.execute()", "def test_merge_fails_different_label(self):\n job1 = ModelJob(label=\"a-label-1\")\n job2 = ModelJob(label=\"a-label-2\")\n\n self.assertRaises(AssertionError, lambda: job1.merge(job2))", "def handle_completed_job(job, job_set, event_list):\n if not job.postvalidate():\n event_list = push_event(\n event_list,\n '{} completed but doesnt have expected output'.format(job.get_type()))\n job.status = JobStatus.FAILED\n\n if job.get_type() == 'coupled_diagnostic':\n img_dir = 'coupled_diagnostics_{casename}-obs'.format(\n casename=job.config.get('test_casename'))\n img_src = os.path.join(\n job.config.get('coupled_project_dir'),\n img_dir)\n setup_local_hosting(job, event_list, img_src)\n elif job.get_type() == 'amwg_diagnostic':\n img_dir = 'year_set_{year}{casename}-obs'.format(\n year=job.config.get('year_set'),\n casename=job.config.get('test_casename'))\n img_src = os.path.join(\n job.config.get('test_path_diag'),\n '..',\n img_dir)\n setup_local_hosting(job, event_list, img_src)\n elif job.get_type() == 'uvcmetrics':\n img_src = os.path.join(job.config.get('--outputdir'), 'amwg')\n setup_local_hosting(job, event_list, img_src)\n job_set_done = True\n for job in job_set.jobs:\n if job.status != JobStatus.COMPLETED:\n job_set_done = False\n break\n if job.status == JobStatus.FAILED:\n job_set.status = SetStatus.FAILED\n return\n if job_set_done:\n job_set.status = SetStatus.COMPLETED", "def is_last_job_failed(self):\n return self._data.get('last_job_failed')", "def handle_job_exception(job):\n exception_sink = StringIO()\n exception_sink_ref = log.add(exception_sink)\n log.exception(f'Error running job '\n f'{box2json(job)}')\n job.worker_error = exception_sink.getvalue()\n log.remove(exception_sink_ref)\n exception_sink.close()\n # TODO: Some form of retry if it's a network or other\n # transient error", "def merge_job_info(run, seqno, slices):\n inset = {\"job_info\": [\"workscript.stdout\", \"workscript.stderr\"],\n }\n outset = {\"job_info\": [\"std_{0:06d}_{1:03d}.out\", \"std_{0:06d}_{1:03d}.err\"],\n }\n tarset = {\"job_info\": \"job_info_{0:06d}_{1:03d}.tgz\",\n }\n badslices = []\n slicepatt = re.compile(r\"([1-9][0-9]*),([1-9][0-9]*)/\")\n for iset in inset:\n outlist = []\n for i in range(0, len(inset[iset])):\n ofile = outset[iset][i].format(run, seqno)\n with open(ofile, \"w\") as ostr:\n for sl in slices:\n ifile = \"{0},{1}/\".format(sl[0], sl[1]) + inset[iset][i]\n for lines in open(ifile):\n ostr.write(lines)\n outlist.append(ofile)\n tarfile = tarset[iset].format(run, seqno)\n cmd = subprocess.Popen([\"tar\", \"zcf\", tarfile] + outlist,\n stderr=subprocess.PIPE)\n elog = cmd.communicate()\n if cmd.returncode != 0:\n for eline in elog[1].decode(\"ascii\").split('\\n'):\n badslice = slicepatt.search(eline)\n if badslice:\n badslices.append(\"{0},{1}\".format(badslice.group(1),\n badslice.group(2)))\n sys.stderr.write(eline + '\\n')\n sys.stderr.write(\"Error on output file {0}\".format(tarfile) +\n \" - job logs tarballing failed!\\n\")\n sys.stderr.flush()\n continue\n odir = output_area + \"/\" + iset + \"/{0:06d}\".format(run)\n upload(tarfile, odir)\n return badslices", "def failed(self):\n\t\tpass", "def check_job_status():\n minutes_ago_30 = datetime.utcnow() - timedelta(minutes=30)\n minutes_ago_35 = datetime.utcnow() - timedelta(minutes=35)\n update_in_progress_jobs()\n\n jobs_not_complete_after_30_minutes = (\n Job.query.filter(\n Job.job_status == JOB_STATUS_IN_PROGRESS,\n and_(\n minutes_ago_35 < Job.updated_at,\n Job.updated_at < minutes_ago_30,\n ),\n )\n .order_by(Job.updated_at)\n .all()\n )\n\n # temporarily mark them as ERROR so that they don't get picked up by future check_job_status tasks\n # if they haven't been re-processed in time.\n job_ids: List[str] = []\n for job in jobs_not_complete_after_30_minutes:\n job.job_status = JOB_STATUS_ERROR\n dao_update_job(job)\n job_ids.append(str(job.id))\n\n if job_ids:\n notify_celery.send_task(\n name=TaskNames.PROCESS_INCOMPLETE_JOBS,\n args=(job_ids,),\n queue=QueueNames.JOBS,\n )\n raise JobIncompleteError(\"Job(s) {} have not completed.\".format(job_ids))", "def job_errors(self) -> str:\n errors = []\n\n # Get any job errors\n for job in self._jobs.values():\n if job and job.status() == JobStatus.ERROR:\n if hasattr(job, \"error_message\"):\n error_msg = job.error_message()\n else:\n error_msg = \"\"\n errors.append(f\"\\n[Job ID: {job.job_id()}]: {error_msg}\")\n\n # Get any job futures errors:\n for jid, fut in self._job_futures.items():\n if fut and fut.done() and fut.exception():\n ex = fut.exception()\n errors.append(\n f\"[Job ID: {jid}]\"\n \"\\n\".join(traceback.format_exception(type(ex), ex, ex.__traceback__))\n )\n return \"\".join(errors)", "def test_failed():\n build()\n sh(\"%s %s --last-failed\" % (PYTHON, RUNNER_PY))", "def testFailFiles(self):\n # Cleaning possible files already occupying the available set\n self.dummySubscription.failFiles([])\n\n # First test - Test if initial file (on available set) is inserted in the\n # failed set - no arguments\n\n dummyFile2 = File('/tmp/dummyfile2,8888', 1, 1, 1)\n # Insert dummyFile2 into the available files Set at dummySubscription\n self.dummySubscription.available.addFile(dummyFile2)\n\n S = self.dummySubscription.availableFiles()\n # Fail all files\n self.dummySubscription.failFiles(S)\n\n assert len(self.dummySubscription.availableFiles()) == 0, \\\n \"failed subscription still has %s files, what's up with that?\" % \\\n len(self.dummySubscription.availableFiles())\n\n # Second test - Test if target files are inserted at the failed set\n\n dummyFileList = []\n # Populating the dummy List with a random number of files\n for i in range(1, random.randint(100, 1000)):\n lfn = '/store/data/%s/%s/file.root' % (random.randint(1000, 9999),\n random.randint(1000, 9999))\n size = random.randint(1000, 2000)\n events = 1000\n run = random.randint(0, 2000)\n lumi = random.randint(0, 8)\n\n file = File(lfn=lfn, size=size, events=events,\n checksums={\"cksum\": \"1\"})\n file.addRun(Run(run, *[lumi]))\n dummyFileList.append(file)\n # Add the new files\n self.dummySubscription.available.addFile(dummyFileList)\n # and fail them\n self.dummySubscription.failFiles(files=dummyFileList)\n # Check there are no files available - everything should be failed\n assert len(self.dummySubscription.availableFiles()) == 0, \\\n \"failed subscription still has %s files, what's up with that?\" % \\\n len(self.dummySubscription.availableFiles())\n\n # Check if all files were inserted at subscription's failed files Set\n for x in dummyFileList:\n assert x in self.dummySubscription.failed.getFiles(type='set'), \\\n 'Couldn\\'t make file failed %s' % x.dict['lfn']\n\n # Third test - Test if a replicate file is erased from the other Sets,\n # when a file is considered failed\n\n dummyFile3 = File('/tmp/dummyfile3,5555', 1, 1, 1)\n dummyFileList = []\n dummyFileList.append(dummyFile3)\n\n # Inserting dummyFile3 to be used as an argument, into each of the other\n # file sets\n self.dummySubscription.acquired.addFile(dummyFile3)\n self.dummySubscription.available.addFile(dummyFile3)\n self.dummySubscription.completed.addFile(dummyFile3)\n\n # Run the method failFiles\n self.dummySubscription.failFiles(files=dummyFileList)\n\n # Check if dummyFile3 was inserted at the failed Set\n assert dummyFile3 in self.dummySubscription.failed.getFiles(type='set'), \\\n 'Replicated file could\\'nt be inserted at failed Set'\n\n # Check if dummyFile3 was erased from all the other Sets\n assert dummyFile3 not in self.dummySubscription.acquired.getFiles(type='set'), \\\n 'Failed file still present at acquired Set'\n assert dummyFile3 not in self.dummySubscription.completed.getFiles(type='set'), \\\n 'Failed file still present at completed Set'\n assert dummyFile3 not in self.dummySubscription.available.getFiles(type='set'), \\\n 'Failed file still present at available Set'", "def _run_one_off_job_resulting_in_failure(self, query_id):\n job_id = user_query_jobs_one_off.UserQueryOneOffJob.create_new()\n params = {\n 'query_id': query_id,\n }\n user_query_jobs_one_off.UserQueryOneOffJob.enqueue(\n job_id, additional_job_params=params)\n user_query_jobs_one_off.UserQueryOneOffJob.register_start(job_id)\n\n # This swap is required so that query failure email can be sent.\n with self.swap(feconf, 'CAN_SEND_EMAILS', True):\n user_query_jobs_one_off.UserQueryOneOffJob.register_failure(\n job_id, 'error')", "def fail(self, message, *args, **kwargs):\n self.counters[\"failure\"] += 1\n self._write(message.format(*args, **kwargs), FAILURE)", "def send_job_failure_email(job_id):\n mail_subject = 'Failed ML Job'\n mail_body = ((\n 'ML job %s has failed. For more information,'\n 'please visit the admin page at:\\n'\n 'https://www.oppia.org/admin#/jobs') % job_id)\n send_mail_to_admin(mail_subject, mail_body)\n other_recipients = (\n NOTIFICATION_EMAILS_FOR_FAILED_TASKS.value)\n system_name_email = '%s <%s>' % (\n feconf.SYSTEM_EMAIL_NAME, feconf.SYSTEM_EMAIL_ADDRESS)\n if other_recipients:\n email_services.send_bulk_mail(\n system_name_email, other_recipients,\n mail_subject, mail_body,\n mail_body.replace('\\n', '<br/>'))", "def jobs_validator(config_file):\n\n config = load_configuration.load_configuration(config_file)\n config = load_configuration.affix_production_tag(config, ['db_collection', 'db_production_files_collection'])\n\n database = MongoDbUtil('admin', db_server=config['db_server'], db_name=config['db_name']).database()\n\n # spawn a stats heartbeat\n stats_heartbeat = StatsHeartbeat(config['heartbeat_interval'],\n database[config['db_collection']],\n accum_stats={'completed_job': 0, 'completed_muDst': 0, 'failed_job':0, 'failed_muDst': 0, 'timeout_job': 0},\n stats={'total_in_queue': 0, 'running': 0, 'running_bfc': 0, 'pending': 0, 'completing': 0, 'unknown': 0})\n logging.info(\"Heartbeat daemon spawned\")\n\n # loop over queued jobs and update status\n files_coll = database[config['db_production_files_collection']]\n\n while True:\n\n try:\n slurm_jobs = slurm_utility.get_queued_jobs(config['slurm_user'])\n stats = {'total_in_queue': len(slurm_jobs), 'running': 0, 'running_bfc': 0, 'pending': 0, 'completing': 0, 'unknown': 0}\n\n for job in files_coll.find({'$or': [{'status': 'PENDING'}, {'status': 'RUNNING'}]}):\n\n #job is still in queue, update info\n if job['slurm_id'] in slurm_jobs:\n\n state = slurm_jobs[job['slurm_id']]\n if state == 'PENDING':\n stats['pending'] += 1\n elif state == 'RUNNING':\n stats['running'] += 1\n stats['running_bfc'] += job['number_of_cores']\n if state != job['status']:\n job['status'] = 'RUNNING'\n files_coll.update_one({'_id':job['_id']}, {'$set': job}, upsert=False)\n elif state == 'COMPLETING':\n stats['completing'] += 1\n else:\n stats['unknown'] += 1\n\n #job is out of queue, check status\n else:\n\n try:\n job_stats = slurm_utility.get_job_stats(job['slurm_id'])\n except slurm_utility.Error:\n logging.warning('Slurm is not available...')\n continue\n\n state = job_stats['state']\n if state == 'COMPLETED':\n job['status'] = 'COMPLETED'\n stats_heartbeat.accum_stats['completed_job'] += 1\n\n if not pass_qa(job):\n job['failed'] += 1\n stats_heartbeat.accum_stats['failed_muDst'] += 1\n else:\n stats_heartbeat.accum_stats['completed_muDst'] += 1\n\n job['Elapsed'] = job_stats['Elapsed']\n job['CPUTime'] = job_stats['CPUTime']\n job['CpuEff'] = job_stats['CpuEff']\n job['MaxRSS'] = job_stats['MaxRSS']\n job['MaxVMSize'] = job_stats['MaxVMSize']\n job['Reserved'] = job_stats['Reserved']\n files_coll.update_one({'_id':job['_id']}, {'$set': job}, upsert=False)\n elif state == 'FAILED':\n stats_heartbeat.accum_stats['failed_job'] += 1\n job['failed'] += 1\n job['status'] = 'FAILED'\n files_coll.update_one({'_id':job['_id']}, {'$set': job}, upsert=False)\n elif state == 'TIMEOUT':\n stats_heartbeat.accum_stats['timeout_job'] += 1\n job['failed'] += 1\n job['status'] = 'TIMEOUT'\n files_coll.update_one({'_id':job['_id']}, {'$set': job}, upsert=False)\n else:\n stats['unknown'] += 1\n\n stats_heartbeat.stats = stats\n\n except slurm_utility.Error:\n logging.warning('Slurm is not available...')\n\n time.sleep(config['recheck_sleep_interval'])", "def check_bad(self, delete_bad=True):\n # XXX: work out why this is needed sometimes on network filesystems.\n result_files = glob.glob(\n os.path.join(self.location, \"results\", RSLT_NM.format(\"*\"))\n )\n\n bad_ids = []\n\n for result_file in result_files:\n # load corresponding batch file to check length.\n result_num = (\n os.path.split(result_file)[-1]\n .strip(\"xyz-result-\")\n .strip(\".jbdmp\")\n )\n batch_file = os.path.join(\n self.location, \"batches\", BTCH_NM.format(result_num)\n )\n\n batch = read_from_disk(batch_file)\n\n try:\n result = read_from_disk(result_file)\n unloadable = False\n except Exception as e:\n unloadable = True\n err = e\n\n if unloadable or (len(result) != len(batch)):\n msg = \"result {} is bad\".format(result_file)\n msg += \".\" if not delete_bad else \" - deleting it.\"\n msg += \" Error was: {}\".format(err) if unloadable else \"\"\n print(msg)\n\n if delete_bad:\n os.remove(result_file)\n\n bad_ids.append(result_num)\n\n return tuple(bad_ids)", "def check_jobs(self):\n # New/aborted jobs\n try:\n jobs = self.sm.get_job('%', phase = 'QUEUED')\n for job in jobs:\n self._launch_job(Job(job['job']))\n res = self.sm.get_aborted_jobs()\n aborts = [x['identifier'] for x in res]\n # Completed jobs\n for t in self.threads:\n if t.isDone() or t.name in aborts:\n self.threads.remove(t)\n # Set job status to COMPLETED\n job = Job(self.sm.get_job(t.name)[0]['job'])\n if t._Future__excpt == None:\n job.set_phase('COMPLETED')\n if t._Future__result != None:\n job.set_results(t._Future__result) \n status = True\n else:\n job.set_phase('ERROR')\n job.set_error_summary(str(t._Future__excpt[1]).replace(\"'\", \"\"))\n status = False\n job.set_end_time(datetime.utcnow().isoformat())\n self.sm.update_job(job = job, completed = status)\n except Exception, e:\n print \"Error:\", e", "def test_aggregation_different_jobs(self):\n job1 = DarshanIngestedJob(label=\"jobA\", file_details={})\n job2 = DarshanIngestedJob(label=\"jobB\", file_details={})\n\n self.assertRaises(AssertionError, lambda: job1.aggregate(job2))", "def add_merge_job(dax, final_name, chunk, level, job_number, final):\n j = Job(name=\"merge.sh\")\n out_file_name = final_name + \"-%d-%d.tar.gz\" %(level, job_number)\n out_file = File(out_file_name)\n if final:\n out_file_name = final_name\n out_file = File(final_name)\n j.uses(out_file, link=Link.OUTPUT, transfer=final)\n j.addArguments(out_file)\n for f in chunk:\n flfn = File(f)\n j.uses(flfn, link=Link.INPUT)\n j.addArguments(flfn)\n j.addProfile(Profile(Namespace.CONDOR, 'request_disk', '100 GB'))\n dax.addJob(j)\n return out_file_name", "def handle_delf_init(self, job):\n\n # Check which, if any, servers the file exists on\n self.put_job_in_all_queues(job)\n list_job_results = self.get_internal_results_from_all_servers()\n if len(list_job_results) == 0:\n # There were no servers active\n self.put_external_result(self.generate_failure_job(\"Unsuccessful, no servers running\"))\n return\n\n return_result = copy.deepcopy(list_job_results[0])\n return_result.result[\"file_exists\"] = False\n\n for result in list_job_results:\n if result.result[\"file_exists\"]:\n return_result.result[\"file_exists\"] = True\n\n return_result.result[\"outcome\"] = \"success\"\n return_result.processed_by = None\n\n self.put_external_result(return_result)", "def check_training_result_files(folder, ruleset, quiet, werror):\n\n too_many_errors = False\n result_folder = os.path.join(folder, 'results')\n for system_folder in _get_sub_folders(result_folder):\n for benchmark_folder in _get_sub_folders(system_folder):\n folder_parts = benchmark_folder.split('/')\n benchmark = folder_parts[-1]\n system = folder_parts[-2]\n\n # If it is not a recognized benchmark, skip further checks.\n if benchmark not in _ALLOWED_BENCHMARKS:\n print('Skipping benchmark: {}'.format(benchmark))\n continue\n\n # Find all result files for this benchmark.\n pattern = '{folder}/result_*.txt'.format(folder=benchmark_folder)\n result_files = glob.glob(pattern, recursive=True)\n\n # No result files were found. That is okay, because the organization\n # may not have submitted any results for this benchmark.\n if not result_files:\n print('No Result Files!')\n continue\n\n _print_divider_bar()\n print('System {}'.format(system))\n print('Benchmark {}'.format(benchmark))\n\n # If the organization did submit results for this benchmark, the number\n # of result files must be an exact number.\n if len(result_files) != _EXPECTED_RESULT_FILE_COUNTS[benchmark]:\n print('Expected {} runs, but detected {} runs.'.format(\n _EXPECTED_RESULT_FILE_COUNTS[benchmark],\n len(result_files)))\n\n errors_found = 0\n result_files.sort()\n for result_file in result_files:\n result_basename = os.path.basename(result_file)\n result_name, _ = os.path.splitext(result_basename)\n run = result_name.split('_')[-1]\n\n # For each result file, run the benchmark's compliance checks.\n _print_divider_bar()\n print('Run {}'.format(run))\n config_file = '{ruleset}/common.yaml'.format(\n ruleset=ruleset,\n benchmark=benchmark)\n checker = mlp_compliance.make_checker(\n ruleset=ruleset,\n quiet=quiet,\n werror=werror)\n valid, _, _, _ = mlp_compliance.main(result_file, config_file, checker)\n if not valid:\n errors_found += 1\n if errors_found == 1:\n print('WARNING: One file does not comply.')\n print('WARNING: Allowing this failure under olympic scoring rules.')\n if errors_found > 1:\n too_many_errors = True\n\n _print_divider_bar()\n if too_many_errors:\n raise Exception('Found too many errors in logging, see log above for details.')", "def handle_api_error(self, err, job_name):\n print(err)\n print('Exiting script execution.')\n self.jobs_collection.update_one({'job_name': job_name},\n {'$currentDate': {'updated': True},\n '$set': {'status': 'ERROR'}})\n exit(1)", "def test_single_error_merge(self):\n test_folder = base_path +'/test_data/merging_tests/error_test/'\n output_file = os.path.join(test_folder, \"output1.jpg\")\n\n self.assertRaises(mi.ImageError, lambda: mi.add_background(test_folder+\"dummy.txt\", test_folder+\"background.jpg\", output_file))\n self.assertRaises(mi.ImageError, lambda: mi.add_background(test_folder+\"render_small.png\", test_folder+\"background.jpg\", output_file))\n self.assertRaises(mi.ImageError, lambda: mi.add_background(test_folder+\"render1.png\", test_folder+\"dummy.txt\", output_file))\n self.assertRaises(mi.ImageError, lambda: mi.add_background(test_folder+\"render1.png\", test_folder+\"background_small.jpg\", output_file))\n self.assertRaises(mi.ImageError, lambda: mi.add_background(test_folder+\"render1.png\", test_folder+\"background_large.jpg\", output_file))", "def test_invalid_n_jobs(n_jobs: Any) -> None:\n with pytest.raises(ValueError, match=r\".*Invalid n_jobs argument.*\"):\n check_n_jobs(n_jobs)", "def handle_put_error(self, err, fileobj):\n # print err\n # TODO: handle different errors accordingly\n if self.upload_attempts < 3:\n self.upload_attempts += 1\n self.wait()\n self.put_upload()\n\n result = self._construct_result_dict(fileobj, \"Failed\")\n self._results_queue.put_nowait(result)", "def check_analysis_pickle_files(self):\n # Make sure that there have been no more trials run since this\n # last processing. To do this, get the number of output files\n for basename in nsort(os.listdir(self.logdir)):\n m = self.labels.subdir_re.match(basename)\n if m is None or 'pckl' in basename:\n continue\n # Here is the output directory which contains the files\n subdir = os.path.join(self.logdir, basename)\n # Account for failed jobs. Get the set of file numbers that\n # exist for all h0 and h1 combinations\n self.get_set_file_nums(\n filedir=subdir\n )\n # Take one of the pickle files to see how many data\n # entries it has.\n data_sets = from_file(os.path.join(self.logdir,\n 'data_sets.pckl'))\n # Take the first data key and then the h0 fit to h0 fid\n # which should always exist. The length of this is then\n # the number of trials in the pickle files.\n if 'h0_fit_to_h0_fid' in data_sets[data_sets.keys()[0]].keys():\n pckl_trials = len(data_sets[data_sets.keys()[0]][\n 'h0_fit_to_h0_fid'].keys())\n # The number of pickle trials should match the number of\n # trials derived from the output directory.\n if self.num_trials == pckl_trials:\n logging.info(\n 'Found files I assume to be from a previous run of'\n ' this processing script containing %i trials. If '\n 'this seems incorrect please delete the files: '\n 'data_sets.pckl, all_params.pckl and labels.pckl '\n 'from the logdir you have provided.'%pckl_trials\n )\n pickle_there = True\n else:\n logging.info(\n 'Found files I assume to be from a previous run of'\n ' this processing script containing %i trials. '\n 'However, based on the number of json files in the '\n 'output directory there should be %i trials in '\n 'these pickle files, so they will be regenerated.'%(\n pckl_trials, self.num_trials)\n )\n pickle_there = False\n else:\n logging.info(\n 'Found files I assume to be from a previous run of'\n ' this processing script which do not seem to '\n 'contain any trials, so they will be regenerated.'\n )\n pickle_there = False\n \n return pickle_there", "def __checkAndRemoveFinished(self, running):\n with self.__queueLock:\n returnCode = running.getReturnCode()\n if returnCode != 0:\n metadataFailedRun = running.getMetadata()\n metadataToKeep = metadataFailedRun\n if metadataFailedRun is not None:\n metadataKeys = list(metadataFailedRun.keys())\n if 'jobHandler' in metadataKeys:\n metadataKeys.pop(metadataKeys.index(\"jobHandler\"))\n metadataToKeep = { keepKey: metadataFailedRun[keepKey] for keepKey in metadataKeys }\n # FIXME: The running.command was always internal now, so I removed it.\n # We should probably find a way to give more pertinent information.\n self.raiseAMessage(f\" Process Failed {running.identifier}:{running} internal returnCode {returnCode}\")\n self.__failedJobs[running.identifier]=(returnCode,copy.deepcopy(metadataToKeep))", "def mark_classifier_errored(self, classifier_id, error_message):\n classifier = self.session.query(self.Classifier).get(classifier_id)\n classifier.error_message = error_message\n classifier.status = ClassifierStatus.ERRORED\n classifier.end_time = datetime.now()\n if (self.get_number_of_hyperpartition_errors(classifier.hyperpartition_id) >\n MAX_HYPERPARTITION_ERRORS):\n self.mark_hyperpartition_errored(classifier.hyperpartition_id)", "def failure_callback(self):\n error_filename = self.run_dir / \"eplusout.err\"\n if error_filename.exists():\n with open(error_filename, \"r\") as stderr:\n stderr_r = stderr.read()\n self.exception = EnergyPlusProcessError(\n cmd=self.cmd, stderr=stderr_r, idf=self.idf\n )\n self.cleanup_callback()", "def test_validate_njobs():\n with pytest.raises(ValueError):\n validate_njobs(0.1)\n with pytest.raises(ValueError):\n validate_njobs(0)\n\n assert validate_njobs(1) == 1\n assert validate_njobs(2) == 2", "def validate_run_results(input_file_parameters, dir_stack):\r\n prev_command_had_output_dir = True\r\n dir_stack_index = -1\r\n command_index = 0\r\n for current_command in input_file_parameters.commands:\r\n # Skip over SPLIT commands\r\n if current_command == 'SPLIT':\r\n continue\r\n\r\n command_index += 1\r\n\r\n if prev_command_had_output_dir:\r\n dir_stack_index += 1\r\n\r\n # Keep track of number of commands created in the current workflow step\r\n number_of_successful_commands = 0\r\n\r\n # Infer command type, parameters, input and output directories\r\n command_type, command_parameters = \\\r\n utils.parse_staplefile_command_line(current_command)\r\n in_dir = dir_stack[dir_stack_index]\r\n if command_type.require_output_dir:\r\n out_dir = dir_stack[dir_stack_index+1]\r\n prev_command_had_output_dir = True\r\n else:\r\n out_dir = in_dir\r\n prev_command_had_output_dir = False\r\n\r\n # Read files until command class finds no more valid input files\r\n number_of_potential_commands = 0\r\n while True:\r\n try:\r\n # The command instance is generated without exceptions if the\r\n # command execution has failed (i.e. expected output\r\n # file does not exist). Otherwise NewFileError is raised.\r\n current_command = command_type(command_parameters, in_dir, out_dir)\r\n except STAPLERerror.NewFileExists:\r\n number_of_successful_commands += 1\r\n number_of_potential_commands += 1\r\n continue\r\n except STAPLERerror.VirtualIOError:\r\n break\r\n number_of_potential_commands += 1\r\n\r\n # Print validation results\r\n if not number_of_successful_commands:\r\n print '{0} command (step number {1}) has not been run.' \\\r\n .format(command_type.name, command_index)\r\n continue\r\n if number_of_successful_commands == number_of_potential_commands:\r\n print '{0} command (step number {1}) has been run succesfully.' \\\r\n .format(command_type.name, command_index)\r\n else:\r\n print '{0} command (step number {1}) workflows have failed {2}/{3} times' \\\r\n .format(command_type.name, command_index,\r\n number_of_potential_commands - number_of_successful_commands,\r\n number_of_potential_commands)", "def update_workitem_error(cases=None):\n cases = cases or WorkItem.objects.all()\n for idx, case in enumerate(cases):\n case.error_check(depth=0)\n update_task_info(state='PROGRESS', meta={'current': idx, 'total': len(cases)})", "def mark_failed(self):\r\n self.require_item()\r\n\r\n url = '{0}/mark_failed'.format(self.get_url())\r\n request = http.Request('PUT', url)\r\n\r\n return request, parsers.parse_empty", "def task_failed(self, worker_name, error):\n self.status = 'failed'\n self.modification_time = current_millis()\n self.message = '{} (worker): {}'.format(worker_name, error)\n return self", "def run(self):\n logging.info('start running job %d' % self.id)\n\n try:\n blocks = self.split_input()\n except Exception as e:\n logging.info('job %d split input error: %s' % (self.id, e.message))\n self.fail()\n return\n self.cnt_mappers = len(blocks)\n logging.info('Splitting input file done: %d blocks' % self.cnt_mappers)\n\n try:\n self.create_output_files()\n except Exception as e:\n logging.info('job %d create output files error: %s' % (self.id,\n e.message))\n self.fail()\n return\n logging.info('job %d: create input files done' % self.id)\n\n self.phase = MAP_PHASE\n self.list = TaskList(self.cnt_mappers)\n\n while True:\n if self.list.fails >= JOB_MAXIMUM_TASK_FAILURE or \\\n self.terminate_flag:\n logging.info('job %d terminated: %d tasks failed' % (self.id,\n self.list.fails))\n self.fail()\n return\n try:\n taskid = self.list.next(JOB_RUNNER_TIMEOUT)\n except:\n logging.info('job %d: map timeout! Kill all tasks' % self.id)\n self.runner.kill_all_tasks(self)\n continue\n if taskid is None:\n break\n task_conf = self.make_mapper_task_conf(taskid)\n self.runner.add_task(task_conf)\n logging.info('enqueued map task %d for job %d' % (taskid, self.id))\n\n self.phase = REDUCE_PHASE\n self.list = TaskList(self.cnt_reducers)\n\n while True:\n if self.list.fails >= JOB_MAXIMUM_TASK_FAILURE or \\\n self.terminate_flag:\n logging.info('job %d terminated: %d tasks failed' % (self.id,\n self.list.fails))\n self.fail()\n return\n try:\n taskid = self.list.next(JOB_RUNNER_TIMEOUT)\n except:\n logging.info('job %d: reduce timeout! Kill all tasks' % self.id)\n self.runner.kill_all_tasks(self)\n continue\n if taskid is None:\n break\n task_conf = self.make_reducer_task_conf(taskid)\n self.runner.add_task(task_conf)\n logging.info('enqueued reduce task %d for job %d' % (taskid, self.id))\n\n for fname in self.result_files:\n self.open_files.remove(fname)\n self.cleanup()\n self.runner.report_job_succeed(self.id)", "def test_jobs_successful(self):\n\n workspace = storage_test_utils.create_workspace()\n file1 = storage_test_utils.create_file()\n data_dict = {\n 'version': '1.0',\n 'input_data': [{\n 'name': 'INPUT_IMAGE',\n 'file_id': file1.id\n }],\n 'output_data': [{\n 'name': 'output_file_pngs',\n 'workspace_id': workspace.id\n }]}\n\n secret_configuration = {\n 'version': '6',\n 'priority': 50,\n 'output_workspaces': {'default': storage_test_utils.create_workspace().name},\n 'settings': {\n 'DB_HOST': 'som.host.name',\n 'DB_PASS': 'secret_password'\n }\n }\n\n seed_job_type = job_test_utils.create_seed_job_type(configuration=secret_configuration)\n seed_job = job_test_utils.create_job(job_type=seed_job_type, status='RUNNING', input=data_dict)\n\n url = '/%s/jobs/%d/' % (self.api, seed_job.id)\n response = self.client.generic('GET', url)\n result = json.loads(response.content)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n self.assertEqual(result['configuration']['priority'],50)\n self.assertNotIn('DB_PASS', result['configuration']['settings'])", "def test_failed_processing(self):\n # setup\n ledger_api_dialogue, fipa_dialogue = self._setup_fipa_ledger_api_dialogues(self)\n\n self.transaction_behaviour.timedout.add(ledger_api_dialogue.dialogue_label)\n\n # operation\n with patch.object(self.logger, \"log\") as mock_logger:\n self.transaction_behaviour.failed_processing(ledger_api_dialogue)\n\n # after\n self.assert_quantity_in_outbox(0)\n\n # finish_processing\n assert self.transaction_behaviour.timedout == set()\n\n mock_logger.assert_any_call(\n logging.DEBUG,\n f\"Timeout dialogue in transaction processing: {ledger_api_dialogue}\",\n )\n\n # failed_processing\n assert fipa_dialogue in self.transaction_behaviour.waiting", "def end_job(self, job: 'JobAdapter',\n label: str,\n job_name: str,\n ) -> bool:\n if job.job_status[0] != 'done' or job.job_status[1]['status'] != 'done':\n try:\n job.determine_job_status() # Also downloads the output file.\n except IOError:\n if job.job_type not in ['orbitals']:\n logger.warning(f'Tried to determine status of job {job.job_name}, '\n f'but it seems like the job never ran. Re-running job.')\n self._run_a_job(job=job, label=label)\n if job_name in self.running_jobs[label]:\n self.running_jobs[label].pop(self.running_jobs[label].index(job_name))\n\n if job.job_status[1]['status'] == 'errored' and job.job_status[1]['keywords'] == ['memory']:\n original_mem = job.job_memory_gb\n if 'insufficient job memory' in job.job_status[1]['error'].lower():\n job.job_memory_gb *= 3\n logger.warning(f'Job {job.job_name} errored because of insufficient memory. '\n f'Was {original_mem} GB, rerunning job with {job.job_memory_gb} GB.')\n self._run_a_job(job=job, label=label)\n elif 'memory requested is too high' in job.job_status[1]['error'].lower():\n used_mem = None\n if 'used only' in job.job_status[1]['error']:\n used_mem = int(job.job_status[1]['error'][-2])\n logger.warning(f'Job {job.job_name} errored because the requested memory is too high. '\n f'Was {original_mem} GB, rerunning job with {job.job_memory_gb} GB.')\n job.job_memory_gb = used_mem * 4.5 if used_mem is not None else job.job_memory_gb * 0.5\n self._run_a_job(job=job, label=label)\n\n if not os.path.isfile(job.local_path_to_output_file) and not job.execution_type == 'incore':\n job.rename_output_file()\n if not os.path.isfile(job.local_path_to_output_file) and not job.execution_type == 'incore':\n if 'restart_due_to_file_not_found' in job.ess_trsh_methods:\n job.job_status[0] = 'errored'\n job.job_status[1]['status'] = 'errored'\n logger.warning(f'Job {job.job_name} errored because for the second time ARC did not find the output '\n f'file path {job.local_path_to_output_file}.')\n elif job.job_type not in ['orbitals']:\n job.ess_trsh_methods.append('restart_due_to_file_not_found')\n logger.warning(f'Did not find the output file of job {job.job_name} with path '\n f'{job.local_path_to_output_file}. Maybe the job never ran. Re-running job.')\n self._run_a_job(job=job, label=label)\n if job_name in self.running_jobs[label]:\n self.running_jobs[label].pop(self.running_jobs[label].index(job_name))\n return False\n\n if job.job_status[0] != 'running' and job.job_status[1]['status'] != 'running':\n if job_name in self.running_jobs[label]:\n self.running_jobs[label].pop(self.running_jobs[label].index(job_name))\n self.timer = False\n job.write_completed_job_to_csv_file()\n logger.info(f' Ending job {job_name} for {label} (run time: {job.run_time})')\n if job.job_status[0] != 'done':\n return False\n if job.job_adapter in ['gaussian', 'terachem'] and os.path.isfile(os.path.join(job.local_path, 'check.chk')) \\\n and job.job_type in ['opt', 'optfreq', 'composite']:\n check_path = os.path.join(job.local_path, 'check.chk')\n if os.path.isfile(check_path):\n if 'directed_scan' in job.job_name and 'cont' in job.directed_scan_type:\n folder_name = 'rxns' if job.is_ts else 'Species'\n r_path = os.path.join(self.project_directory, 'output', folder_name, job.species_label, 'rotors')\n if not os.path.isdir(r_path):\n os.makedirs(r_path)\n shutil.copyfile(src=check_path, dst=os.path.join(r_path, 'directed_rotor_check.chk'))\n self.species_dict[label].checkfile = os.path.join(r_path, 'directed_rotor_check.chk')\n else:\n self.species_dict[label].checkfile = check_path\n if job.job_type == 'scan' or job.directed_scan_type == 'ess':\n for rotors_dict in self.species_dict[label].rotors_dict.values():\n if rotors_dict['pivots'] in [job.pivots, job.pivots[0]]:\n rotors_dict['scan_path'] = job.local_path_to_output_file\n self.save_restart_dict()\n return True", "def handle_downloader_jobs(jobs: List[DownloaderJob]) -> None:\n queue_capacity = get_capacity_for_downloader_jobs()\n\n jobs_dispatched = 0\n for count, job in enumerate(jobs):\n if jobs_dispatched >= queue_capacity:\n logger.info(\n \"We hit the maximum downloader jobs / capacity ceiling, \"\n \"so we're not handling any more downloader jobs now.\"\n )\n return\n\n if job.num_retries < utils.MAX_NUM_RETRIES:\n if requeue_downloader_job(job):\n jobs_dispatched = jobs_dispatched + 1\n else:\n utils.handle_repeated_failure(job)", "def check_run_logs(input_file_parameters, dir_stack):\r\n # Check resource manager produced .out and .err files for assumed error\r\n # messages.\r\n print 'Checking runtime log files for error messages...'\r\n file_names = os.listdir(input_file_parameters.output_dir)\r\n\r\n newest_fix_index = 0\r\n files_to_check = []\r\n for file_name in sorted(file_names):\r\n if file_name.endswith('.out') or file_name.endswith('.err'):\r\n if file_name.startswith('FIX_'):\r\n current_fix_index = file_name.split('_')[1]\r\n if int(current_fix_index) > newest_fix_index:\r\n newest_fix_index = current_fix_index\r\n files_to_check = [file_name]\r\n else:\r\n files_to_check.append(file_name)\r\n if newest_fix_index == 0:\r\n files_to_check.append(file_name)\r\n\r\n if newest_fix_index > 0:\r\n print 'Workflow has been fixed {0} times. Checking only the {1} .out ' \\\r\n 'and .err files of the newest run.'.format(newest_fix_index,\r\n len(files_to_check))\r\n\r\n i = 0\r\n number_of_warnings = 0\r\n warning_strings = ['invalid', 'exception', 'warning']\r\n error_strings = ['error', 'segmentation fault', 'canceled', '(err):']\r\n skip_strings = ['adapters with at most',\r\n 'no. of allowed errors',\r\n 'error counts']\r\n for file_name in files_to_check:\r\n handle = open(os.path.join(input_file_parameters.output_dir,\r\n file_name))\r\n\r\n i += 1\r\n finish_string_exists = False\r\n warning_messages = []\r\n error_messages = []\r\n j = 0\r\n for line in handle:\r\n j += 1\r\n line = line.lower()\r\n if any(s in line for s in skip_strings):\r\n continue\r\n if any(w in line for w in warning_strings):\r\n warning_messages.append(j)\r\n number_of_warnings += 1\r\n if any(e in line for e in error_strings):\r\n error_messages.append(j)\r\n number_of_warnings += 1\r\n if 'finished at:' in line:\r\n finish_string_exists = True\r\n if os.path.splitext(file_name)[1] == '.out':\r\n if not finish_string_exists:\r\n error_messages.append('This thread has not been finished:\\n{0}'\r\n .format(os.path.join(input_file_parameters.output_dir,\r\n file_name)))\r\n\r\n if warning_messages or error_messages:\r\n print '\\n\\nThe following file contains possible warning/error messages:'\r\n print os.path.join(input_file_parameters.output_dir, file_name)\r\n if len(warning_messages) != 0:\r\n print '\\nWarning messages on lines:'\r\n print ', '.join(map(str, warning_messages))\r\n if len(error_messages) != 0:\r\n print '\\nError messages on lines:'\r\n print ', '.join(map(str, error_messages))\r\n\r\n print '\\n\\n{0} .out and .err files checked ({1} processes)'.format(i, i/2)\r\n print 'Potential problems detected: {0}'.format(number_of_warnings)", "def files_errored_out(self) -> float:\n return pulumi.get(self, \"files_errored_out\")", "def _validate_output(cls, item):\n if item.output and item.status_code != job_models.STATUS_CODE_COMPLETED:\n cls._add_error(\n base_model_validators.ERROR_CATEGORY_OUTPUT_CHECK,\n 'Entity id %s: output: %s for job is not empty but '\n 'job status is %s' % (item.id, item.output, item.status_code))\n\n if item.output is None and (\n item.status_code == job_models.STATUS_CODE_COMPLETED):\n cls._add_error(\n base_model_validators.ERROR_CATEGORY_OUTPUT_CHECK,\n 'Entity id %s: output for job is empty but '\n 'job status is %s' % (item.id, item.status_code))", "async def test_job_async_failed(my_job_async):\n\n # Set up callback to get notifications when job state changes.\n job = None\n\n def on_job_update(_job):\n \"\"\"The callback to update `job`.\"\"\"\n nonlocal job\n job = _job\n\n my_job_async.set_on_update(on_job_update)\n\n # Submit a job which must fail.\n await my_job_async.job(mustfail=True)\n\n # Process ASGI messages and wait for the job to finish.\n await my_job_async.process_jobs()\n\n # Check a state of the job.\n assert job.state == 'ERROR', f'Failed job has wrong state `{job.state}`!'", "def test_bad_file_name(self):\n\n url = '/%s/jobs/%i/input_files/?file_name=%s' % (self.api, self.job.id, 'not_a.file')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n result = results['results']\n self.assertEqual(len(result), 0)", "def parse(self, **kwargs):\n output_filename = self.node.get_option('output_filename')\n jobname = self.node.get_option('jobname')\n if jobname is not None:\n output_filename = \"log-\" + jobname + \".yaml\"\n # Check that folder content is as expected\n files_retrieved = self.retrieved.list_object_names()\n files_expected = [output_filename]\n # Note: set(A) <= set(B) checks whether A is a subset of B\n if not set(files_expected) <= set(files_retrieved):\n self.logger.error(\"Found files '{}', expected to find '{}'\".format(\n files_retrieved, files_expected))\n return self.exit_codes.ERROR_MISSING_OUTPUT_FILES\n\n # add output file\n self.logger.info(\"Parsing '{}'\".format(output_filename))\n# print(self.retrieved._repository._get_base_folder().get_abs_path(output_filename))\n output = BigDFTLogfile(self.retrieved._repository._get_base_folder().\n get_abs_path(output_filename))\n try:\n output.store()\n except ValidationError:\n self.logger.info(\"Impossible to store LogFile - ignoring '{}'\".\n format(output_filename))\n\n# with self.retrieved.open(output_filename, 'rb') as handle:\n# output_node = SinglefileData(file=handle)\n# output_dict_aiida=orm.Dict(dict=output_dict)\n# output_dict_aiida.store()\n# output_log_aiida=BigDFTLogfile(output)\n self.out('bigdft_logfile', output)\n\n return ExitCode(0)", "def test_matching_jobs_invalid(self):\n self.assertEquals(\n self.query_api.get_matching_jobs(\n \"try\", \"146071751b1e\",\n 'Invalid buildername'), [])", "def job_step_error(self, job_request_payload, message):\n payload = JobStepErrorPayload(job_request_payload, message)\n self.send(job_request_payload.error_command, payload)", "def main():\n\n # prepare option parser\n parser = OptionParser(usage=\"usage: %prog [options] filename\", description=\"Wait until all jobs in a text file are processed.\", epilog=\"In the given file, each line beginning with a dot and a space (. ) will be executed. The file is modified to reflect the execution state of each job (r-running, d-done, !-failed, e-error).\")\n parser.add_option(\"-e\", \"--use-exit-status\", dest=\"use_exit_status\", default=False, action=\"store_true\", help=\"use exit status 0 only if all jobs are marked done [default: no]\")\n parser.add_option(\"-p\", \"--progress\", dest=\"progress\", default=False, action=\"store_true\", help=\"show progress while waiting [default: no]\")\n\n # parse options\n (options, args) = parser.parse_args()\n\n # get file name\n if len(args) < 1:\n print \"Need a filename (a list of jobs)\"\n print \"\"\n print parser.get_usage()\n sys.exit(1)\n fname = args[0]\n\n # process file\n\n f = open(fname, 'r+b', 0)\n\n jobs = read_jobs(f)\n states = list()\n while True:\n old_states = states\n refresh_job_states(f, jobs)\n states = list((j.state for j in jobs))\n\n count_unproc = len([\".\" for j in jobs if j.state == '.'])\n count_running = len([\".\" for j in jobs if j.state == 'r'])\n count_failed = len([\".\" for j in jobs if j.state == '!'])\n count_error = len([\".\" for j in jobs if j.state == 'e'])\n count_done = len([\".\" for j in jobs if j.state == 'd'])\n\n if states != old_states:\n if options.progress:\n bar_len = 16\n\n len_running = int(1.0 * count_running/len(jobs)*bar_len)\n len_failed = int(1.0 * count_failed /len(jobs)*bar_len)\n len_error = int(1.0 * count_error /len(jobs)*bar_len)\n len_done = int(1.0 * count_done /len(jobs)*bar_len)\n\n len_rest = bar_len - (len_running + len_failed + len_error + len_done)\n bar_print = (\"=\" * len_done) + (\"e\" * len_error) + (\"!\" * len_failed) + (\">\" * len_running) + (\" \" * len_rest)\n print \"progress: %3d of %3d jobs processed, %d errors [%s]\" % (count_failed + count_error + count_done, len(jobs), count_failed + count_error, bar_print)\n\n if count_unproc + count_running == 0:\n if options.use_exit_status and (count_done != len(jobs)):\n sys.exit(1)\n sys.exit(0)\n\n time.sleep(1)\n\n f.close()", "def test_results_error_stacktrace(self, affiliate_items):\n updater = mock.Mock(side_effect=ValueError('Shopping'))\n batch_job = BatchJob(affiliate_items, updater)\n\n with_message = 0\n for result in batch_job.run():\n with_message += (result.is_error and 'Shopping' in result.details)\n\n assert with_message == 4", "def test_group_job_fail(client):\n response = client.get('/group/group_b')\n assert response.status_code == 400", "def failed_messages(self, namespace, queue):\n failed = []\n for m in self.messages(namespace, queue):\n if m.error:\n failed.append(m)\n return failed", "def log_failures(self):\n for exception in self.queue_manager.failure_descriptions():\n self.logger.info(exception)", "def failed_replication_jobs(self, failed_replication_jobs):\n if self._configuration.client_side_validation and failed_replication_jobs is None:\n raise ValueError(\"Invalid value for `failed_replication_jobs`, must not be `None`\") # noqa: E501\n if (self._configuration.client_side_validation and\n failed_replication_jobs is not None and failed_replication_jobs < 0): # noqa: E501\n raise ValueError(\"Invalid value for `failed_replication_jobs`, must be a value greater than or equal to `0`\") # noqa: E501\n\n self._failed_replication_jobs = failed_replication_jobs" ]
[ "0.64195037", "0.6388101", "0.6150792", "0.6027144", "0.60192287", "0.60131675", "0.5950463", "0.59497935", "0.5936166", "0.59313744", "0.5911836", "0.5835701", "0.5786333", "0.5782995", "0.5782858", "0.57241905", "0.57127255", "0.5671086", "0.56666774", "0.56585604", "0.5630111", "0.56151116", "0.56025076", "0.5575183", "0.55689085", "0.5553291", "0.5538929", "0.5537704", "0.5527574", "0.55024874", "0.549992", "0.54879695", "0.54860383", "0.54829216", "0.54787385", "0.5472727", "0.5471534", "0.5466337", "0.5454956", "0.54433644", "0.5439557", "0.54312104", "0.54268265", "0.5423423", "0.5406577", "0.5400804", "0.538161", "0.536696", "0.5357432", "0.5347952", "0.5321293", "0.531255", "0.5305645", "0.5303203", "0.53025067", "0.53002983", "0.5298679", "0.52929854", "0.5290797", "0.5276432", "0.52726626", "0.52668816", "0.5261851", "0.5257374", "0.52571356", "0.5247527", "0.5241637", "0.5228588", "0.52211756", "0.5220455", "0.5212635", "0.5211109", "0.5206927", "0.5193841", "0.51909053", "0.5189366", "0.518053", "0.5175711", "0.5169202", "0.51626766", "0.51533467", "0.51500523", "0.5145807", "0.51443386", "0.51442075", "0.5142767", "0.5134162", "0.51150453", "0.5109468", "0.51071864", "0.5106905", "0.5106114", "0.5103665", "0.5103573", "0.5096441", "0.509531", "0.50910753", "0.5085191", "0.5081033", "0.5077719" ]
0.7485483
0
_startComponent_ Fire up the two main threads
def startComponent(self): # create message service instance self.ms = MessageService() # register self.ms.registerAs("MergeAccountant") # subscribe to messages self.ms.subscribeTo("MergeAccountant:StartDebug") self.ms.subscribeTo("MergeAccountant:EndDebug") self.ms.subscribeTo("MergeAccountant:Enable") self.ms.subscribeTo("MergeAccountant:Disable") self.ms.subscribeTo("JobSuccess") self.ms.subscribeTo("GeneralJobFailure") self.ms.subscribeTo("MergeAccountant:SetJobCleanupFlag") # set trigger access for cleanup self.trigger = Trigger(self.ms) # set message service instance for PM interaction File.ms = self.ms # wait for messages while True: # get message messageType, payload = self.ms.get() self.ms.commit() # create session object Session.set_database(dbConfig) Session.connect() # start transaction Session.start_transaction() # process it self.__call__(messageType, payload) self.ms.commit() # commit and close session Session.commit_all() Session.close_all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def startComponent(self):\n self.ms = MessageService()\n self.ms.registerAs(\"TaskRegisterComponent\")\n \n self.ms.subscribeTo(\"CRAB_Cmd_Mgr:NewTask\")\n self.ms.subscribeTo(\"TaskRegisterComponent:StartDebug\")\n self.ms.subscribeTo(\"TaskRegisterComponent:EndDebug\")\n self.ms.subscribeTo(\"KillTask\")\n\n self.ms.subscribeTo(\"TaskRegisterComponent:HeartBeat\")\n self.ms.remove(\"TaskRegisterComponent:HeartBeat\")\n self.ms.publish(\"TaskRegisterComponent:HeartBeat\",\"\",self.HeartBeatDelay)\n self.ms.commit()\n\n # TaskRegister registration in WMCore.MsgService\n self.myThread = threading.currentThread()\n self.factory = WMFactory(\"msgService\", \"WMCore.MsgService.\"+ \\\n self.myThread.dialect)\n self.newMsgService = self.myThread.factory['msgService'].loadObject(\"MsgService\")\n self.myThread.transaction.begin()\n self.newMsgService.registerAs(\"TaskRegisterComponent\")\n self.myThread.transaction.commit()\n\n #\n # non blocking call event handler loop\n # this allows us to perform actions even if there are no messages\n #\n try: \n while True:\n # dispatch loop for Queued messages\n while True:\n try:\n senderId, evt, pload = self.sharedQueue.get_nowait()\n taskUniqName = pload.split(\"::\")[0]\n\n # dealloc threadId\n if senderId not in self.availWorkersIds:\n self.availWorkersIds.append(senderId)\n\n # dispatch the messages and update status \n if evt in [\"TaskRegisterComponent:NewTaskRegisteredPartially\"]:\n logging.info(\"Task %s registred Partially\"%taskUniqName) \n elif evt in [\"TaskRegisterComponent:NewTaskRegistered\"] and taskUniqName not in self.killingRequestes:\n self.ms.publish(evt, pload)\n logging.info(\"Publish Event: %s %s\" % (evt, pload))\n self.ms.commit()\n elif evt in [\"RegisterWorkerComponent:WorkerFailsToRegisterPartially\"]:\n logging.info(\"Task %s failed partially\"%taskUniqName)\n elif evt in [\"RegisterWorkerComponent:RegisterWorkerFailed\"]:\n logging.info(\"Task %s failed\"%taskUniqName)\n self.markTaskAsNotSubmitted(taskUniqName, 'all')\n self.ms.publish(evt, pload)\n logging.info(\"Publish Event: %s %s\" % (evt, pload))\n self.ms.commit()\n elif taskUniqName in self.killingRequestes:\n logging.info(\"Task %s killed by user\"%taskUniqName)\n self.markTaskAsNotSubmitted(taskUniqName, self.killingRequestes[taskUniqName])\n del self.killingRequestes[taskUniqName]\n\n except Queue.Empty, e:\n logging.debug(\"Queue empty: \" + str(e))\n break\n except Exception, exc:\n logging.error(\"'Generic' problem: \" + str(exc))\n logging.error( str(traceback.format_exc()) )\n\n if len(self.availWorkersIds) > 0:\n try:\n type, payload = self.ms.get( wait = False )\n\n if type is None:\n time.sleep( self.ms.pollTime )\n continue\n else:\n self.__call__(type, payload)\n self.ms.commit()\n except Exception, exc:\n logging.error(\"ERROR: Problem managing message...\")\n logging.error(str(exc))\n except Exception, e:\n logging.error(e)\n logging.info(traceback.format_exc())\n\n return", "def Start(self):\r\n # Attach a WorkerDispatcher to the current thread\r\n self.m_disp = ttapi.Dispatcher.AttachWorkerDispatcher()\r\n self.m_disp.BeginInvoke(Action(self.Init))\r\n self.m_disp.Run()", "def Start(self):\r\n # Attach a WorkerDispatcher to the current thread\r\n self.m_disp = ttapi.Dispatcher.AttachWorkerDispatcher()\r\n self.m_disp.BeginInvoke(Action(self.Init))\r\n self.m_disp.Run()", "def start(self):\n self.thread.start()", "def connect(self):\n self.start()", "def start(self) -> None:\n start_thread(super().start, self.__class__.__name__)", "def do_start(self, args) :\r\n if not self.wait2start:\r\n Thread(target=self.start_loop).start()\r\n self.wait2start = True\r\n else:\r\n self.__Logger.warn(\"Waiting for simulators to be ready. To force start, type \\\"forcestart\\\"\")", "def start(self):\n self._setup_thread()\n self.thread.start()", "def _start(self):", "def start(self):\n\n # ioloop.install()\n threading.Thread(target=self.loop.start).start()\n time.sleep(1)", "def start(self):\n self._thread.start()", "def sync_start(self):", "def start(self):\n self.synchronizer = SyncThread(self.api, self.sync_dir)\n self.synchronizer.start()\n self.tray.on_login()", "def run_component(self):\n raise NotImplementedError", "def main_thread_enter(self):\n ...", "def activate(self):\n self.start()", "def _start(self):\n pass", "def _start(self):\n\n _log.debug(\"Pipeline {} launching run components\".format(self.id))\n self._start_time = time.time()\n for run in self.runs:\n run.start()\n if run.sleep_after:\n time.sleep(run.sleep_after)", "def start(self):\r\n pass", "def start(self):\n ...", "def _make_thread(self):\r\n pass", "def __init__(self):\n Thread.__init__(self)\n self.start()", "def __init__(self):\n Thread.__init__(self)\n self.start()", "def Start(self) :\n\t\t...", "def start(self) -> None:\n self.should_exit = False\n self._main_thread = threading.Thread(target=self._wrap_start, daemon=True)\n self._main_thread.start()", "def start(self):\r\n monitor_thread = Thread(target = self.monitor)\r\n monitor_thread.setDaemon(True)\r\n monitor_thread.start()\r\n\r\n main_thread = Thread(target = self.run)\r\n main_thread.setDaemon(True)\r\n main_thread.start()", "def start(self):\n self._is_waiting = False", "def start(self):\n \n self.thread.start()\n self.state = \"running\"", "def thread_side_started(self):\n self.state = self.STATE_CONNECTING\n self.send_to_gui(HelloMessage(plugin=self.plugin, thread_side=self))\n logger.debug(\"Thread side has been started\")", "def startService(self):\n super(MasterService, self).startService()\n self.dispatcher.startDispatching()", "def start(self):\n\t\tif self._send_greenlet is None:\n\t\t\tself._send_greenlet = gevent.spawn(self._send_loop)", "def start (self):\n pass", "def start (self):\n pass", "def start(self):\r\n # self._app.console.widget().clear() # clear log window\r\n # reset threads\r\n self._threads = []\r\n self.set_threads()\r\n self.ui_pump.setValue(0)\r\n self.ui_fan.setValue(0)\r\n self._motor_controllers[0].set_device(port=\"COM7\")\r\n self._labjacks[0].activate()\r\n logging.info('Starting acquisition', extra=self.extra)\r\n\r\n self.set_data() # initialize both data frame and plot selection\r\n self._signal = self._signal_selection.currentText()\r\n\r\n self._times = [0.0]\r\n self._timer.start() # start timer\r\n for thread in self._threads: # start remaining threads\r\n thread.start()\r\n self.start_rec()\r\n\r\n self.ui_start.setEnabled(False)\r\n self.ui_time.setEnabled(False)\r\n self.ui_stop.setEnabled(True)", "def start_processing(self):", "def start(self):\n\n if self.__bus_controller == None:\n print(\"can't start please pass me the needed dictionaries\")\n\n self.__global_messages = {\"kick reason\": \"\", \"free text\": \"\"}\n self.__lock_data = False\n self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.stop = False\n __main_loop = threading.Thread(target=self.__main_loop, args=(), name=\"bus updater\")\n __main_loop.start()", "def start(self):\n self._do_work.set()\n self._worker_thread.start()", "def start(self) -> None:\n ...", "def start(self) -> None:\n ...", "def start(self):\n self.p.start()", "async def start(self):", "async def start(self):", "def start(self) -> None:", "def start(self) -> None:", "def start():", "def start():", "def start():", "def start():", "def init(self):\n self.dispatcher.start()\n self.replyer.start()", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start_work(self):\n self.worker_thread = WorkerThread(self.feedback_log, self.job_list) # only created when processing begins. May be recreated\n self.worker_thread.daemon = True\n self.worker_thread.start()", "def __init__(self ):\r\n # ------------------- basic setup --------------------------------\r\n AppGlobal.controller = self\r\n msg = \"\"\r\n msg = ( f\"{msg}\\n=============== starting SmartTerminal ===============\" )\r\n msg = ( f\"{msg}\\n\" )\r\n msg = ( f\"{msg}\\n -----> prints may be sent to log file !\" )\r\n msg = ( f\"{msg}\\n\" )\r\n AppGlobal.logger.log( 55, msg )\r\n\r\n # AppGlobal.logger.info( \"no logger\" )\r\n # AppGlobal.logger.debug( \"no logger 2\" )\r\n\r\n self.app_name = \"SmartTerminal\"\r\n self.version = \"Ver6: 2020 02 22.0\"\r\n self.gui = None # build later\r\n self.no_restarts = -1 # start is restart 0\r\n self.no_helper_restarts = 0\r\n\r\n # ----------- for second thread -------\r\n #self.helper_thread_manager = None\r\n self.queue_to_gui = None\r\n self.queue_from_gui = None\r\n self.gui_recieve_lock = threading.Lock() # when locked the gui will process receive, acquired released in helper\r\n # how different from just a variable set?\r\n self.restart( )", "def cb_gui_test_1( self, ):\r\n print( \"cb_gui_test_1\" )\r\n self.helper_thread.toggle_lock()", "def __init__(self, coresys: CoreSys):\n self.coresys = coresys\n self.run = asyncio.Lock()", "def __init__(self, coresys: CoreSys):\n self.coresys = coresys\n self.run = asyncio.Lock()", "def __init__(self):\n Thread.__init__(self)\n self.start() # start the thread", "def do_start(self):\n threading.Thread(group = None, \n target = self._subscribe_message, name = \"RabbitMQSubscribeThread\") .start()\n threading.Thread(group = None, \n target = self._publish_message, name = \"RabbitMQPublishThread\").start()", "def lock(self):\n self.mainloop().lock()", "def run(self):\n self.submit()\n self.start()", "def start(self):\n self.__main_window = Tk()\n self.__start_loading_window()", "def started(self):", "def run(self):\n self.thread = threading.Thread(target=self._main)\n self.thread.start()\n self.running = True", "def start():\n Networker.stop()\n Networker.Instance = Networker()", "def main(self):\n\n self._setup_task_manager()\n self._setup_source_and_destination()\n self.task_manager.blocking_start(waiting_func=self.waiting_func)\n self._cleanup()", "def run(self):\n self.thread_send.start()\n self.thread_receive.start()", "async def _main(self):\n while True:\n time.sleep(1)", "def on_run(self):\n wxMediator.on_run(self)\n listener_evt = InterThreadEventWX(self,\n wxEVT_NEW_LISTEN_CONN) \n talker_evt = InterThreadEventWX(self,\n wxEVT_NEW_TALK_CONN) \n server = self.server()\n sys.stderr.write('Starting server threads...\\n')\n sys.stderr.flush()\n server.start_other_threads(listener_evt, talker_evt)", "def process_thread(self):", "def startMainLoop(self):\n self.thread.start()\n self.theMainLoop.emit(QtCore.SIGNAL(\"step()\"))\n self.app.exec_()", "def run(self):\n self.started()", "def run(self):\n self.ident = threading.current_thread().ident\n self.ready.set()\n self.exec_()", "def start(self):\n self.socket_manager.start()\n\n if self.poc != None:\n self._start_thread(self.contact_poc, daemon=True)\n self.send_discovery_message(self.poc)\n self._start_thread(self.watch_for_discovery_messages, daemon=True)\n self._start_thread(self.watch_for_heartbeat_messages, daemon=True)\n self._start_thread(self.send_heartbeat_messages, daemon=True)\n self._start_thread(self.watch_for_heartbeat_timeouts, daemon=True)\n self._start_thread(self.watch_for_rtt_messages, daemon=True)\n self._start_thread(self.calculate_rtt_timer, daemon=True)\n self._start_thread(self.watch_for_app_messages, daemon=True)\n\n while True: # Blocking. Nothing can go below this\n self.check_for_inactivity()", "def _start_comms_thread(self):\n\n self.commthread = threading.Thread(target=self.__comms_thread_body, daemon=True)\n self.__comm_term = False\n self.__comm_exc = None\n\n self.commthread.start()\n\n self.__comm_term = False", "def _init_threads(self):\n\n startTh = Thread(name='InitialStart', target = self._singleUpdate, args=(self.outPs, ))\n self.threads.append(startTh)\n\n sendTh = Thread(name='SteeringListen',target = self._listen_for_steering, args = (self.inPs[0], self.outPs, ))\n self.threads.append(sendTh)", "def start(self):\r\n if self._ready:\r\n return\r\n\r\n self._start()\r\n self._ready = True", "def start(self):\n self.active = True", "def start(self):\n if not self.thread:\n self.thread = WndUtils.run_thread(None, self.save_data_thread, ())", "def start(self):\n if self.__started:\n return\n\n self.__started = True\n GLib.timeout_add(GtkMainLoop.DEADLINE_GLIB, self.__ioloop_run)\n self.__gi_loop.run()", "def start(self):\n self.j_pump.start()\n return self", "def _bg_thread_main(self) -> None:\n while not self._done:\n self._run_server_cycle()", "def HACore():\r\n logging.info('HomeAutomationCore initialized')\r\n threadlist = ThreadList()\r\n sharedqueue = QueueList()\r\n\r\n modules = LoadModulesFromTuple(INSTALLED_APPS)\r\n logging.debug('Loading modules:')\r\n # create threads and so on\r\n for mod in modules:\r\n logging.info(mod)\r\n mt = None\r\n if issubclass(modules[mod].cls, HAWebService): # TODO: too closely coupled\r\n mt = modules[mod].cls(name=mod, callback_function=None, queue=sharedqueue, threadlist=threadlist, modules=modules)\r\n elif issubclass(modules[mod].cls, HomeAutomationQueueThread):\r\n mt = modules[mod].cls(name=mod, callback_function=None, queue=sharedqueue, threadlist=threadlist)\r\n elif issubclass(modules[mod].cls, LEDMatrixBase):\r\n pass # leave these to be created within LEDMatrixCore\r\n else: # assume its the level below (no queue)\r\n logging.debug('Instantiating module ' + mod)\r\n mt = modules[mod].cls(name=mod, callback_function=None)\r\n\r\n if mt != None:\r\n if issubclass(modules[mod].cls, HAWebService):\r\n mt.daemon = True\r\n threadlist.append(mt)\r\n\r\n logging.debug('Starting up module threads')\r\n for ti in threadlist:\r\n ti.start() # start all threads at this point\r\n\r\n timecheck = time.time()\r\n while 1:\r\n # main loop that handles queue and threads, and through executing queue item changes the state of the statemachine\r\n try:\r\n for remote_module in REMOTE_APPS:\r\n remote_addr = remote_module['Address']\r\n remote_apps = remote_module['INSTALLED_APPS']\r\n if not 'socketclient' in remote_module.keys():\r\n remote_module['socketclient'] = LEDMatrixSocketClient(remote_addr) #cache\r\n\r\n for item in [i for i in sharedqueue if i.cls in remote_apps]:\r\n logging.info('Sending queue item to remote host: ' + str(remote_module) )\r\n remote_module['socketclient'].SendSerializedQueueItem(item.__str__())\r\n sharedqueue.remove(item)\r\n time.sleep(0.1)\r\n\r\n if time.time() - timecheck > 10:\r\n timecheck = time.time()\r\n logging.info('10s mainloop interval, number of threads: %d (%s), queue items: %d' %\r\n ( len(threadlist), ', '.join([str(i) for i in threadlist]), len(sharedqueue) ) )\r\n for _thread in threadlist:\r\n if not _thread.isAlive():\r\n logging.info('Removing dead thread: ' + _thread.name)\r\n threadlist.remove(_thread)\r\n # TODO: call other module cleanup (e.g. remove instance ref in webservice globals)\r\n # webservice_state_instances\r\n # and webservice_class_instances\r\n\r\n except KeyboardInterrupt:\r\n logging.info('Detected ctrl+c, exiting main loop and stopping all threads')\r\n break\r\n except:\r\n logging.critical(\"Unexpected error in main loop (exiting): \" + traceback.format_exc() )\r\n break\r\n\r\n logging.debug('Stopping all threads')\r\n for _thread in threadlist:\r\n _thread.stop_event.set() # telling the threads to stop\r", "def start_non_blocking(self):\n self._start_thread(self.start, daemon=True)", "def start_workunit(self, workunit):\r\n pass", "def start_workunit(self, workunit):\r\n pass", "def start(self, _):\n logger.debug(\"Spawning metric & span reporting threads\")\n self.should_threads_shutdown.clear()\n self.sensor.start()\n instana.singletons.tracer.recorder.start()", "def create_and_start_threads(self):\r\n self.create_threads()\r\n self.start_threads()", "def on_button_start(self, event):\n self.mouse_thread = MyThread()\n self.displayLbl.SetLabel(\"Thread started!\")\n #btnStart = event.GetEventObject()\n #btnStart.Disable()\n self.btnStart.Disable()", "def start(self) -> None:\n self.bus.subscribe(\"cache:ready\", self.revive)\n self.bus.subscribe(\"scheduler:add\", self.add)\n self.bus.subscribe(\"scheduler:persist\", self.persist)\n self.bus.subscribe(\"scheduler:remove\", self.remove)\n self.bus.subscribe(\"scheduler:upcoming\", self.upcoming)\n self.scheduler = sched.scheduler(time.time, time.sleep)\n cherrypy.process.plugins.Monitor.start(self)", "def start(self):\n self.microblaze.run()\n self.microblaze.write(MAILBOX_OFFSET + MAILBOX_PY2IOP_CMD_OFFSET, 0)\n self.load_switch_config(self.iop_switch_config)", "def start(self):\n self.open()\n #t = Thread(target=self._cache_update, args=())\n #t.daemon = True\n #t.start()", "async def __aenter__(self):\n await self.start()", "def start(self) -> None:\n self.__enter__()", "def do_start(self, *arg):\n self._keep_looping = True\n\n print_info(\"Starting sensors\")\n\n self._loop()" ]
[ "0.7069816", "0.65574837", "0.65574837", "0.6347645", "0.6330432", "0.6329626", "0.6314845", "0.62738216", "0.6261208", "0.62377006", "0.6209128", "0.6199045", "0.61689764", "0.61576235", "0.6155671", "0.61154896", "0.61113256", "0.6080329", "0.60750294", "0.60603696", "0.60528016", "0.60521626", "0.60521626", "0.602777", "0.6015601", "0.60142374", "0.6002914", "0.60006815", "0.600037", "0.59962934", "0.599444", "0.5993969", "0.5993969", "0.59832984", "0.5980234", "0.597371", "0.59699535", "0.5948535", "0.5948535", "0.5946651", "0.59395754", "0.59395754", "0.5933085", "0.5933085", "0.592605", "0.592605", "0.592605", "0.592605", "0.59214205", "0.5904611", "0.5904611", "0.5904611", "0.5904611", "0.5904611", "0.5904611", "0.5904611", "0.5904611", "0.59013116", "0.590061", "0.589108", "0.5875722", "0.5875722", "0.586774", "0.5854744", "0.5848847", "0.5846845", "0.5846465", "0.5839374", "0.5821439", "0.58212715", "0.58176255", "0.5809844", "0.5804134", "0.57991105", "0.5797706", "0.57909966", "0.5788781", "0.5786758", "0.5777307", "0.5773467", "0.5768187", "0.57607627", "0.57552385", "0.5746307", "0.57462585", "0.5743573", "0.5737622", "0.57259685", "0.5721497", "0.5720223", "0.5720223", "0.57188404", "0.56939346", "0.56900084", "0.5689169", "0.5686653", "0.5684856", "0.56822777", "0.56754225", "0.5663787" ]
0.6306281
7
_getVersionInfo_ return version information
def getVersionInfo(cls): return __version__ + "\n"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_version_info(self):\n return self._jadeRpc('get_version_info')", "def get_version_info() -> Tuple[Text, Text]:", "def version_info(self):\n\n return __version_info__", "def info(self):\n version_str = self.version\n return Utils.version_str2tuple(version_str)", "def version_info():\r\n return tuple(map(int, __version__.split('.')))", "def get_version():\n return about.get_version()", "def version_info(self):\n if self._api_version is None:\n self.query_api_version()\n return self._api_version['api-major-version'],\\\n self._api_version['api-minor-version']", "def _get_version(self):", "def get_version_info(self):\n sys_info_service = self.robot.all_services.get(\"sys_info\")\n if sys_info_service is not None:\n log.info(\"System version info: %s\" % sys_info_service.system_version)\n else:\n log.warning(\"Service get_version_info is not enabled!\")", "def get_version():\n return '%d.%d.%d' % version_info", "def pyzmq_version_info():\n return version_info", "def read_versionInfo(self):\n # PROTECTED REGION ID(SdpMasterLeafNode.versionInfo_read) ENABLED START #\n return self.attr_map[\"versionInfo\"]\n # PROTECTED REGION END # // SdpMasterLeafNode.versionInfo_read", "def get_version_info():\n from docplex.cp.model import CpoModel\n try:\n with CpoSolver(CpoModel()) as slvr:\n return slvr.agent.version_info\n except:\n if config.context.log_exceptions:\n traceback.print_exc()\n pass\n return {}", "def _GetVersionInformation(self):\n version_information_resource = self._GetVersionInformationResource()\n if not version_information_resource:\n return\n\n file_version = version_information_resource.file_version\n major_version = (file_version >> 48) & 0xffff\n minor_version = (file_version >> 32) & 0xffff\n build_number = (file_version >> 16) & 0xffff\n revision_number = file_version & 0xffff\n\n self._file_version = (\n f'{major_version:d}.{minor_version:d}.{build_number:d}.'\n f'{revision_number:d}')\n\n product_version = version_information_resource.product_version\n major_version = (product_version >> 48) & 0xffff\n minor_version = (product_version >> 32) & 0xffff\n build_number = (product_version >> 16) & 0xffff\n revision_number = product_version & 0xffff\n\n self._product_version = (\n f'{major_version:d}.{minor_version:d}.{build_number:d}.'\n f'{revision_number:d}')\n\n if file_version != product_version:\n logging.warning((\n f'Mismatch between file version: {self._file_version:s} and product '\n f'version: {self._product_version:s} in message file: '\n f'{self.windows_path:s}.'))", "def get_version(self):\n pass", "def get_server_info(self):\n raise NotImplementedError('Database.get_version()')", "def test_versionInfo(self):\n self.assertEqual(\n nevow.__version_info__,\n (nevow.version.major, nevow.version.minor, nevow.version.micro))", "def get_version_info(self, key_name='ver_sw_release'):\n if key_name in self._msg_info_dict:\n val = self._msg_info_dict[key_name]\n return ((val >> 24) & 0xff, (val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff)\n return None", "def GetVersion(self):\n return self._SendRequest(HTTP_GET, \"/version\", None, None)", "def get_version_info(self):\n\n try:\n nt_header = self.get_nt_header()\n except ValueError, ve:\n return obj.NoneObject(\"PE file failed initial sanity checks: {0}\".format(ve))\n\n try:\n unsafe = self.obj_vm.get_config().UNSAFE\n except AttributeError:\n unsafe = False\n\n for sect in nt_header.get_sections(unsafe):\n if str(sect.Name) == '.rsrc':\n root = obj.Object(\"_IMAGE_RESOURCE_DIRECTORY\", self.obj_offset + sect.VirtualAddress, self.obj_vm)\n for rname, rentry, rdata in root.get_entries():\n # We're a VERSION resource and we have subelements\n if rname == resource_types['RT_VERSION'] and rentry:\n for sname, sentry, sdata in rdata.get_entries():\n # We're the single sub element of the VERSION\n if sname == 1 and sentry:\n # Get the string tables\n for _stname, stentry, stdata in sdata.get_entries():\n if not stentry:\n return obj.Object(\"_VS_VERSION_INFO\", offset = (stdata.DataOffset + self.obj_offset), vm = self.obj_vm)\n\n return obj.NoneObject(\"Cannot find a _VS_VERSION_INFO structure\")", "def get_version():\n return 1", "def rpc_version(self):\n\t\tvinfo = {'version': version.version, 'version_info': version.version_info._asdict()}\n\t\tvinfo['rpc_api_version'] = version.rpc_api_version\n\t\treturn vinfo", "def version(self):\n _, body = self.request('/', 'GET')\n return body.get('version', None)", "def get_version(self):\n return self.__make_api_call('get/version')", "def get_release_info(self):\r\n return self.detail_info.get_release_info(self.version)", "def do_get_version(self, arg):\n arg = arg\n print(self.phil.if_version)", "def get_version(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/version\").json()", "def version():\n\n pass", "def get_version(self):\n\t\treturn call_sdk_function('PrlApi_GetVersion')", "def version():\n\n print(VERSION_CODE)", "def Version(self) -> _n_0_t_12:", "def Version(self) -> _n_0_t_12:", "def get_version():\n major=c_int_t(0)\n minor=c_int_t(0)\n patch=c_int_t(0)\n safe_call(backend.get().af_get_version(c_pointer(major), c_pointer(minor), c_pointer(patch)))\n return major.value,minor.value,patch.value", "def GetVersion(self) -> \"char const *\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMF2_GetVersion(self)", "def version(self):\r\n return self._get_version(self.java)", "def get_version():\n vers = [\"%(major)i.%(minor)i\" % __version_info__, ]\n\n if __version_info__['micro']:\n vers.append(\".%(micro)i\" % __version_info__)\n if __version_info__['releaselevel'] != 'final':\n vers.append('%(releaselevel)s' % __version_info__)\n return ''.join(vers)", "def get_version():\r\n return __version__", "def version(self) -> Dict[str, str]:\n return self.get_version()", "def getVersion(self, *args):\n return _libsbml.CompExtension_getVersion(self, *args)", "def version(self):\n info = json.loads(self.get_info())\n return FapiInfo(info).version", "def test_get_version(self):\n pass", "def version(self):", "def getVersion(self):\n return self.get('Version', type=\"numeric\")", "def get_version(self) -> Dict[str, str]:\n return self.http.get(self.config.paths.version)", "def get_version(self, params):\n return self.version", "def get_version():\n click.echo(get_current_version_number())", "def GetVersion(self) -> \"char const *\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMD2_GetVersion(self)", "def extract_version_info():\n version = None\n if os.path.exists('.version'):\n with open('.version') as f:\n line = f.read().rstrip()\n log.info('.version contains \"%s\"', line)\n if line.startswith('openafs-'):\n # Extract version from the git tag name.\n version = re.sub('openafs-[^-]*-', '', line).replace('_', '.')\n elif line.startswith('BP-'):\n # Branch point tags do not contain the version number.\n log.info('.version file has old branch point tag name.')\n else:\n # Use the given version string.\n version = line\n if not version:\n # Unable to lookup version from the .version file, try to extract the\n # version from the source directory name.\n root = os.path.basename(os.path.abspath('.'))\n m = re.match(r'openafs-(.*)', root)\n if m:\n version = m.group(1)\n if not version:\n module.fail_json(msg='Unable to determine version.')\n\n # Determine package version and release from the OpenAFS version.\n m1 = re.match(r'(.*)(pre[0-9]+)', version) # prerelease\n m2 = re.match(r'(.*)dev', version) # development\n m3 = re.match(r'(.*)-([0-9]+)-(g[a-f0-9]+)$', version) # development\n m4 = re.match(r'(.*)-([a-z]+)([0-9]+)', version) # custom\n if m1:\n v = m1.group(1)\n r = \"0.{0}\".format(m1.group(2))\n elif m2:\n v = m2.group(1)\n r = \"0.dev\"\n elif m3:\n v = m3.group(1)\n r = \"{0}.{1}\".format(m3.group(2), m3.group(3))\n elif m4:\n v = m4.group(1).replace('-', '')\n r = \"1.2.{0}.{1}\".format(m4.group(3), m4.group(2))\n else:\n v = version # standard release\n r = \"1\" # increment when repackaging this version\n # '-' are used as delimiters by rpm.\n v = v.replace('-', '_')\n r = r.replace('-', '_')\n return dict(openafs_version=version, package_version=v, package_release=r)", "def getVersion(cls):\n cVersion = cls.__getLib().voikkoGetVersion()\n return unicode_str(cVersion, \"UTF-8\")", "def version(self):\n return self._get(\"version\")", "def getVersion(self):\n return _libsbml.SBase_getVersion(self)", "def method_get_version(self) -> str: # pragma: nocover\n raise NotImplementedError", "def getversion(self):\n return self.__version", "def version():\n return __VERSION__", "def getVersion(self, *args):\n return _libsbml.FbcExtension_getVersion(self, *args)", "def get_version(self):\n return self.cur_config['version']['name']", "def getversion(): # 3\n res,resargs = _msk.Env.getversion()\n if res != 0:\n raise Error(rescode(res),\"\")\n _major_return_value,_minor_return_value,_build_return_value,_revision_return_value = resargs\n return _major_return_value,_minor_return_value,_build_return_value,_revision_return_value", "def info(self, usecache=1):\r\n info = usecache and cache.info.get(self)\r\n if not info:\r\n try:\r\n output = self._svn('info')\r\n except py.process.cmdexec.Error, e:\r\n if e.err.find('Path is not a working copy directory') != -1:\r\n raise py.error.ENOENT(self, e.err)\r\n elif e.err.find(\"is not under version control\") != -1:\r\n raise py.error.ENOENT(self, e.err)\r\n raise\r\n # XXX SVN 1.3 has output on stderr instead of stdout (while it does\r\n # return 0!), so a bit nasty, but we assume no output is output\r\n # to stderr...\r\n if (output.strip() == '' or \r\n output.lower().find('not a versioned resource') != -1):\r\n raise py.error.ENOENT(self, output)\r\n info = InfoSvnWCCommand(output)\r\n\r\n # Can't reliably compare on Windows without access to win32api\r\n if py.std.sys.platform != 'win32': \r\n if info.path != self.localpath: \r\n raise py.error.ENOENT(self, \"not a versioned resource:\" + \r\n \" %s != %s\" % (info.path, self.localpath)) \r\n cache.info[self] = info\r\n self.rev = info.rev\r\n return info", "def get_version(self):\n return self.version", "def version(self) -> str:\n data = \"none yet\"\n if self.STARTED:\n data = (\n self.about.get(\"Version\")\n or self.about.get(\"Installed Version\")\n or \"DEMO\"\n )\n data = data.replace(\"_\", \".\")\n return data", "def get_version():\n return __version__", "def get_version():\n return __version__", "def get_version():\n return __version__", "def Hello(self):\n version = '1.5.3'\n print 'returned version number', version\n return version", "def get_version(self):\n return self.api_version", "def GetVersion(self) -> \"char const *\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMF3_GetVersion(self)", "def getVersion(self):\n return _libsbml.SBasePlugin_getVersion(self)", "def get_version(self):\n return 0", "def getInfo():", "def get_version(self):\n verxml = self._ncc.nxoscli('show version')\n self.logger.debug(verxml)\n verparsed = _begin_parse(verxml)\n sysmgrclischema = parse_get_nsmap(verparsed)\n self.logger.debug(\"NSMAP: {}\".format(sysmgrclischema))\n showversion = find_element(['sys_ver_str', 'chassis_id', 'host_name', 'loader_ver_str'], sysmgrclischema,\n verparsed)\n self.logger.debug(str(showversion))\n self.hostname = showversion['host_name']\n self.chassis_id = showversion['chassis_id']\n self.system_version = showversion['sys_ver_str']", "def getversion():\n major_ = ctypes.c_int32()\n minor_ = ctypes.c_int32()\n revision_ = ctypes.c_int32()\n res = __library__.MSK_XX_getversion(ctypes.byref(major_),ctypes.byref(minor_),ctypes.byref(revision_))\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])\n major_ = major_.value\n _major_return_value = major_\n minor_ = minor_.value\n _minor_return_value = minor_\n revision_ = revision_.value\n _revision_return_value = revision_\n return (_major_return_value,_minor_return_value,_revision_return_value)", "def get_version_info():\n out = \"\\nmpsyt version : %s \" % __version__\n out += \"\\n notes : %s\" % __notes__\n out += \"\\npafy version : %s\" % pafy.__version__\n out += \"\\nPython version : %s\" % sys.version\n out += \"\\nProcessor : %s\" % platform.processor()\n out += \"\\nMachine type : %s\" % platform.machine()\n out += \"\\nArchitecture : %s, %s\" % platform.architecture()\n out += \"\\nPlatform : %s\" % platform.platform()\n out += \"\\nsys.stdout.enc : %s\" % sys.stdout.encoding\n out += \"\\ndefault enc : %s\" % sys.getdefaultencoding()\n out += \"\\nConfig dir : %s\" % get_config_dir()\n envs = \"TERM SHELL LANG LANGUAGE\".split()\n\n for env in envs:\n value = os.environ.get(env)\n out += \"\\nenv:%-11s: %s\" % (env, value) if value else \"\"\n\n return out", "def test_get_version():\n result = uflash.get_version()\n assert result == '.'.join([str(i) for i in uflash._VERSION])", "def versionstring():\n return \"%i.%i.%i\" % __version__", "def versionstring():\n return \"%i.%i.%i\" % __version__", "def get_version(self):\n\n r = self._create_operation_request(self, method=\"GET\")\n root_info = send_session_request(self._session, r).json()\n return root_info[\"currentVersion\"]", "def get_version(self):\n res = requests.get(self.base_url + '/version')\n\n return res", "def info(self, zolo, module, args):\n print(f\"[Other] Version {module.version}\")", "def get_release_info(self, version):\r\n try:\r\n return self._detail[\"releases\"][version]\r\n except KeyError as key_error:\r\n log.warning(key_error)\r\n return []", "def version(self):\n version = self.get_rpc().getnetworkinfo()[\"subversion\"]\n version = version.replace(\"/\", \"\").replace(\"Satoshi:\", \"v\")\n return version", "def getVersion(self, *args):\n return _libsbml.MultiExtension_getVersion(self, *args)", "def getInfo(self, formatted=False):\n\n\t\tinfo = {}\n\t\tinfo['Python'] = \"%d.%d.%d\" %(sys.version_info[0], sys.version_info[1], sys.version_info[2])\n\t\tinfo[__binding__] = __binding_version__\n\t\tinfo['Qt'] = QtCore.qVersion()\n\t\tinfo['OS'] = platform.system()\n\t\tinfo['Environment'] = HOST\n\n\t\tif formatted:\n\t\t\tinfo_ls = []\n\t\t\tfor key, value in info.items():\n\t\t\t\tinfo_ls.append(\"{} {}\".format(key, value))\n\t\t\tinfo_str = \" | \".join(info_ls)\n\t\t\treturn info_str\n\n\t\telse:\n\t\t\treturn info", "def version(self):\n return self.rpc.call(MsfRpcMethod.CoreVersion)", "def getVersion(self, *args):\n return _libsbml.SBMLExtension_getVersion(self, *args)", "def formver(self) -> Tuple[int]:\n return (self.header.format, self.header.version)", "def printVersionInfo():\n #pass\n pathname = sys.argv[0]\n myMtime = os.stat(pathname)[ST_MTIME]\n modDate = CONFIG['utils'].mktime(myMtime)\n logIt(\"Python Script: \" + pathname + \"\\n\")\n logIt(\"Version Date: \" + modDate + \"\\n\")", "def version():\n return uname().version", "def version():\n return uname().version", "def get_version(self):\n url = '{}/v2/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['version']\n except Exception as e:\n pass\n return ''", "def get_version(self):\n return version.__version__", "def get_version(self):\n return version.__version__", "def version():\n version_info = pbr.version.VersionInfo('ardana-service')\n return version_info.version_string_with_vcs()", "def GetVersion(self) -> \"char const *\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMD3_GetVersion(self)", "def __getNullVersion(self):\n print(\"Can't get version\")\n return \"unknownVendor\", \"unknownRelease\"", "def info() -> None:", "def get_release_info(version='v1.1-dev', date='2021-07-22'):\n # go to the repository directory\n dir_orig = os.getcwd()\n os.chdir(os.path.dirname(os.path.dirname(__file__)))\n\n # grab git info into string\n try:\n cmd = \"git describe --tags\"\n version = subprocess.check_output(cmd.split(), stderr=subprocess.DEVNULL)\n version = version.decode('utf-8').strip()\n\n # if there are new commits after the latest release\n if '-' in version:\n version, num_commit = version.split('-')[:2]\n version += '-{}'.format(num_commit)\n\n cmd = \"git log -1 --date=short --format=%cd\"\n date = subprocess.check_output(cmd.split(), stderr=subprocess.DEVNULL)\n date = date.decode('utf-8').strip()\n except:\n pass\n\n # go back to the original directory\n os.chdir(dir_orig)\n return version, date", "def version_number() -> int:\n return 0", "def get_version(self):\n return self._version", "def get_version(self):\n return self._version", "def svn_client_info(char_path_or_url, svn_opt_revision_t_peg_revision, svn_opt_revision_t_revision, svn_info_receiver_t_receiver, svn_boolean_t_recurse, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass" ]
[ "0.80382955", "0.7959786", "0.78665364", "0.772043", "0.76289654", "0.7546513", "0.7518231", "0.7508608", "0.74194604", "0.72860444", "0.72014874", "0.7074777", "0.7063996", "0.7035926", "0.7033924", "0.699559", "0.6988741", "0.69872457", "0.69825757", "0.69360656", "0.6931772", "0.6914961", "0.6866997", "0.6850112", "0.6835878", "0.68352205", "0.6833622", "0.67676777", "0.6748516", "0.6748366", "0.6746036", "0.6746036", "0.67258334", "0.6723947", "0.67135096", "0.67113525", "0.6710127", "0.6703621", "0.66867715", "0.6685003", "0.6682768", "0.6672434", "0.6670052", "0.66507936", "0.665008", "0.66400665", "0.66349226", "0.66296035", "0.65952384", "0.6587197", "0.6585328", "0.6578513", "0.65676427", "0.65641046", "0.6561693", "0.654653", "0.6543412", "0.6542767", "0.65413535", "0.6536098", "0.6517943", "0.6517943", "0.6517943", "0.6507712", "0.65020555", "0.6501504", "0.6498659", "0.647493", "0.646014", "0.6450444", "0.6445814", "0.64198184", "0.6408692", "0.6392047", "0.6392047", "0.6360444", "0.63529664", "0.6351732", "0.6348439", "0.63370943", "0.6331463", "0.6328505", "0.6327954", "0.63223875", "0.6320945", "0.63209265", "0.63191295", "0.63191295", "0.6318657", "0.6316356", "0.6316356", "0.6316043", "0.63100517", "0.63069415", "0.6302712", "0.62937576", "0.62788063", "0.6275819", "0.6275819", "0.62702644" ]
0.80043864
1
Make sure that the marks are correctly set
def test_marks(data, request): current_marks = get_pytest_marks_on_item(request._pyfuncitem) assert len(current_marks) == 1 if data == 1: assert current_marks[0].name == "fast" elif data == 2: assert current_marks[0].name == "slow" else: raise AssertionError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check4(self, x, y, mark, d = 0):", "def check3(self, x, y, mark, d = 0):", "def assign_mark(entry: StudentEntry):\n pass", "def test_correct_init_multiple_markers(self):\n subject = self._get_test_subject(markers=MULTIPLE_MARKERS)\n\n self.assertEqual(len(subject.signal_map), len(MULTIPLE_MARKERS))\n self.assertEqual(type(subject.marker('monkey_bum')), Marker)\n self.assertEqual(subject.marker('monkey_bum'), MULTIPLE_MARKERS[0])\n\n self.assertEqual(subject.previous_marker, None)\n self.assertEqual(subject.current_marker, MULTIPLE_MARKERS[0])\n self.assertEqual(subject.next_marker, MULTIPLE_MARKERS[1])", "def clean_marks(si):\n marks_in_conts, marks_in_bkg = None, None\n conts_detail, total_mark = None, None\n doNothing = False\n conts_detail = re.sub(r'\\W+', '', si.get('containers_detail', ''))\n total_marks = re.sub(r'\\W+', '', si.get('total_mark', ''))\n bkg_no = re.sub(r'\\W+', '', si.get('bkg_no', ''))\n\n if conts_detail == total_marks and total_marks != '':\n raws = si['total_mark']\n for raw_line in raws.split('\\n'):\n line = re.sub(r'\\W+', '', raw_line).upper()\n if 'MARK' in line:\n marks_in_conts = raw_line\n break\n\n if marks_in_conts is not None:\n conts_detail = raws.split(marks_in_conts)[0]\n total_mark = raws.split(marks_in_conts)[1]\n bkg_no = si.get('bkg_no', '')\n else:\n doNothing = True\n\n elif bkg_no == total_marks and total_marks != '':\n raws = si['total_mark']\n for raw_line in raws.split('\\n'):\n line = re.sub(r'\\W+', '', raw_line).upper()\n if 'BOOKINGNO' in line:\n bkg_no = line.replace('BOOKINGNO', '')\n if len(bkg_no) == 12 and bkg_no.isalnum():\n marks_in_bkg = raw_line\n break\n if marks_in_bkg is not None:\n total_mark = raws.split(marks_in_bkg)[0]\n conts_detail = si.get('containers_detail', '')\n else:\n doNothing = True\n else:\n doNothing = True\n\n return doNothing, conts_detail, total_mark, bkg_no", "def pruneMarks(self):\n self.__prune_marks(self.nodes(data=True))", "def updatemark(self):\n trans = {'A':'1','1':'1','C':'2','2':'2','G':'3','3':'3','T':'4','4':'4'}\n lmark = self.mark['marklist']\n for mark in lmark:\n self.mark[mark]['a1'] = trans.get(self.mark[mark]['a1'],'0')\n self.mark[mark]['a2'] = trans.get(self.mark[mark]['a2'],'0')", "def __post_init__(self) -> None:\n self.annotate = self.x_y[0] == 0 or self.x_y[1] == 0", "def test_check_input_with_gephi_mark(self):\n F = FaultDiagnosis(\"tests/TOY_graph.csv\")\n F.check_input_with_gephi()\n edges_dict = F.edges_df.to_dict()\n\n mark_dict = {\n 1: '2',\n 2: '3',\n 3: '4',\n 4: '5',\n 5: '6',\n 6: '6',\n 7: '7',\n 8: '8',\n 9: '6',\n 10: '9',\n 11: '9',\n 13: '16',\n 14: '16',\n 15: '17',\n 16: '10',\n 17: '11',\n 18: '11',\n 19: '19',\n 20: '19',\n 21: '19',\n 22: '12',\n 23: '12',\n 24: '13',\n 25: '13',\n 26: '14',\n 27: '14',\n 28: '18'\n }\n\n self.assertDictEqual(\n mark_dict,\n edges_dict['mark'],\n msg=\"MARK failure: check_input_with_gephi function\")", "def test_check_input_with_gephi_mark(self):\n F = FaultDiagnosis(\"tests/TOY_graph.csv\")\n F.check_input_with_gephi()\n edges_dict = F.edges_df.to_dict()\n\n mark_dict = {\n 1: '2',\n 2: '3',\n 3: '4',\n 4: '5',\n 5: '6',\n 6: '6',\n 7: '7',\n 8: '8',\n 9: '6',\n 10: '9',\n 11: '9',\n 13: '16',\n 14: '16',\n 15: '17',\n 16: '10',\n 17: '11',\n 18: '11',\n 19: '19',\n 20: '19',\n 21: '19',\n 22: '12',\n 23: '12',\n 24: '13',\n 25: '13',\n 26: '14',\n 27: '14',\n 28: '18'\n }\n\n self.assertDictEqual(\n mark_dict,\n edges_dict['mark'],\n msg=\"MARK failure: check_input_with_gephi function\")", "def test_marking_duplication(self):\n container = stixmarx.new()\n package = container.package\n red_marking = generate_marking_spec(generate_red_marking_struct())\n amber_marking = generate_marking_spec(generate_amber_marking_struct())\n\n incident = Incident(title=\"Test\")\n package.add_incident(incident)\n\n indicator = Indicator(title=\"Test\")\n incident.related_indicators.append(indicator)\n\n container.add_marking(incident, red_marking, descendants=True)\n container.add_global(amber_marking)\n\n self.assertTrue(container.is_marked(indicator, red_marking))\n self.assertTrue(container.is_marked(indicator, amber_marking))\n\n markings = container.get_markings(indicator)\n self.assertEqual(len(markings), 2)", "def test_correct_init(self):\n subject = self._get_test_subject(markers=[SINGLE_MARKER])\n\n self.assertEqual(len(subject.signal_map), 1)\n self.assertEqual(type(subject.marker('monkey_nuts')), Marker)\n self.assertEqual(subject.marker('monkey_nuts'), SINGLE_MARKER)\n\n self.assertEqual(subject.previous_marker, None)\n self.assertEqual(subject.current_marker, SINGLE_MARKER)\n self.assertEqual(subject.next_marker, None)", "def set_mark( self, mark, index ):\n\n try:\n int(self.__grid[index-1])\n\n if mark.lower() == 'x' or mark.lower() == 'o': \n self.__grid[index-1] = mark\n\n return 1\n\n except ValueError:\n return 0", "def onMarkChange(self, dataName, value, msg):\r\n print str(value)\r\n if (len(value) != 0):\r\n print \"We detected naomarks !\"", "def test_incorrect_init(self):\n with self.assertRaises(MissingMarkersException) as context:\n self.subject()", "def test_observable_marking(self):\n container = stixmarx.new()\n package = container.package\n red_marking = generate_marking_spec(generate_red_marking_struct())\n \n observable = generate_observable()\n package.add_observable(observable)\n container.add_marking(observable, red_marking)\n \n self.assertTrue(container.is_marked(observable, red_marking))", "def test_marking_removal(self):\n container = stixmarx.new()\n package = container.package\n red_marking = generate_marking_spec(generate_red_marking_struct())\n \n indicator = Indicator(title=\"Test\")\n package.add_indicator(indicator)\n \n container.add_marking(indicator, red_marking)\n self.assertTrue(container.is_marked(indicator, red_marking))\n\n container.remove_marking(indicator, red_marking)\n self.assertFalse(container.is_marked(indicator, red_marking))", "def set_square(self, x, y, mark):\n if self.board[x][y] == 0:\n self.board[x][y] = mark\n return True\n else:\n return False", "def print_marks(self):\n\t\tSYMBOLS = {CLOSED: \".\", FLAG: \"x\", BOOM: \"#\", CLEAR: \" \"}\n\t\tfor y in range(self.height):\n\t\t\tfor x in range(self.width):\n\t\t\t\tm = self.marks[x][y]\n\t\t\t\tprint(SYMBOLS.get(m, m), end=\"\")\n\t\t\tprint(\"\")", "def test_absent_marking_removal_failure(self):\n container = stixmarx.new()\n package = container.package\n red_marking = generate_marking_spec(generate_red_marking_struct())\n \n indicator = Indicator(title=\"Test\")\n package.add_indicator(indicator)\n self.assertRaises(errors.MarkingNotFoundError, container.remove_marking, indicator, red_marking)\n \n observable = generate_observable()\n package.add_observable(observable)\n self.assertRaises(errors.MarkingNotFoundError, container.remove_marking, observable, red_marking)", "def _handleMarkernoChangedSave(self):\n \n # this is a NEW Place \n if not self.id:\n # we are going to only focus on Places with territoryno\n if not self.territoryno:\n return\n \n self.max_markerno = self.maxTerritoryMarkerno(self.territoryno)\n self.countTerritoryPlaces = self.countTerritoryPlaces(self.territoryno)\n \n # if markerno NOT was set\n if not self.markerno:\n self.markerno = _getNextTerritoryPlaceMarkerno()\n \n self._handleMarkernoChanged()", "def test_component_marking(self):\n container = stixmarx.new()\n package = container.package\n red_marking = generate_marking_spec(generate_red_marking_struct())\n \n indicator = Indicator(title=\"Test\")\n package.add_indicator(indicator)\n \n container.add_marking(indicator, red_marking, descendants=True)\n \n self.assertTrue(container.is_marked(indicator, red_marking))", "def test_package_marking_error(self):\n container = stixmarx.new()\n package = container.package\n red_marking = generate_marking_spec(generate_red_marking_struct())\n\n self.assertRaises(errors.UnmarkableError, container.add_marking, package, red_marking)", "def MarkerDefineDefault(self):\n #self.MarkerDefine(TextView.MARKER_NUM, wx.stc.STC_MARK_ROUNDRECT, wx.BLACK, wx.BLUE)\n pass", "def check_spacers(\n raw_signal_array,\n set_of_spacer_marks\n ):\n \n temp =[i for i, state in enumerate(raw_signal_array)\n if re.search('^\\|$', state)]\n # Build the set of spacers for uniformity in rendering.\n # if a space is missedin Excel, it will be forced on\n # the wave.\n if any((set_of_spacer_marks - set(temp))):\n logging.warning('{1} Possible missing spacers, Wave will be overwritten with spacers at columns-{0}'.format(sorted(set_of_spacer_marks), raw_signal_array[0]))\n set_of_spacer_marks = set_of_spacer_marks | set(temp)\n return set_of_spacer_marks", "def markCell(self, mark, i, j):\n if 0 <= i < len(self.board) and 0 <= j < len(self.board) and self.empty_cells > 0:\n if self.board[i][j] != \" \":\n return False\n else:\n self.board[i][j] = mark\n self.empty_cells -= 1\n return True\n else:\n return False", "def add_marks(self, *marks):\n return Mgn(self.genus, self.marks.union(marks))", "def mark(self, mark):\n\n self._mark = mark", "def reset(self):\n debug('resetting')\n self.marked = False", "def resetAlignmentCenter(self):\n cent = self.TiltSeries_._TiltAlignmentParas.cent\n imdimX = self.TiltSeries_._imdimX\n imdimY = self.TiltSeries_._imdimY\n print(imdimX, imdimY)\n if cent[0] != imdimX//2+1 or cent[1] != imdimY//2+1:\n #rint \"Centers do not match: cent=\"+str(cent)+\", imdim=\"+str(imdim)\n self.TiltSeries_._TiltAlignmentParas.cent = [imdimX//2+1, imdimY//2+1]", "def assert_goodness(self):\n if self._setted:\n self.assert_stored_iss()\n self.assert_stored_ks()\n ## Check idxs\n self.assert_stored_idxs()\n ## Check sp_relative_pos\n self.assert_stored_sp_rel_pos()", "def test_grading_exception(self):\r\n all_gradesets, all_errors = self._gradesets_and_errors_for(self.course.id, self.students)\r\n student1, student2, student3, student4, student5 = self.students\r\n self.assertEqual(\r\n all_errors,\r\n {\r\n student3: \"I don't like student3\",\r\n student4: \"I don't like student4\"\r\n }\r\n )\r\n\r\n # But we should still have five gradesets\r\n self.assertEqual(len(all_gradesets), 5)\r\n\r\n # Even though two will simply be empty\r\n self.assertFalse(all_gradesets[student3])\r\n self.assertFalse(all_gradesets[student4])\r\n\r\n # The rest will have grade information in them\r\n self.assertTrue(all_gradesets[student1])\r\n self.assertTrue(all_gradesets[student2])\r\n self.assertTrue(all_gradesets[student5])", "def test_observable_marking_removal(self):\n container = stixmarx.new()\n package = container.package\n red_marking = generate_marking_spec(generate_red_marking_struct())\n \n observable = generate_observable()\n package.add_observable(observable)\n \n container.add_marking(observable, red_marking)\n\n self.assertTrue(container.is_marked(observable, red_marking))\n container.remove_marking(observable, red_marking)\n self.assertFalse(container.is_marked(observable, red_marking))", "def mark(self): # type: () -> None\n self._marker = self._idx", "def _validate_annotations(self):\n for i, (k, v) in enumerate(self._annotations_dict.items()):\n for index, annotation in enumerate(v):\n startOffset = int(annotation['startOffset'])\n endOffset = int(annotation['endOffset'])\n tweet = self._tweets_dict[k]\n annotatedText = annotation['annotatedText']\n\n realOffset = tweet.find(annotatedText)\n if realOffset != startOffset:\n #print(\"Fixing startOffset for {}. (annotated at position {}, but should be at {})\".format(k, startOffset, realOffset))\n\n diff = realOffset - startOffset\n annotation['startOffset'] = \"{}\".format(startOffset+diff)\n annotation['endOffset'] = \"{}\".format(endOffset+diff)", "def recheckPosition(self):\n self.start = self.bounds[0].pos\n self.end = self.bounds[1].pos", "def mark_item(report):\n if report['pass'] == True:\n report['mark'] = 1 \n else:\n report['mark'] = 0 \n \n report['mark-max'] = 1 \n \n return report['mark'], report['mark-max']", "def save_drawing_score(self, user, quiz, marks):\n\n obj = UserQuizMark.objects.create(user=user, quiz=quiz, marks=marks*10)\n return obj", "def test_embedded_observable_direct_marking(self):\n container = stixmarx.new()\n package = container.package\n red_marking = generate_marking_spec(generate_red_marking_struct())\n amber_marking = generate_marking_spec(generate_amber_marking_struct())\n \n indicator = Indicator(title=\"Test\")\n package.add_indicator(indicator)\n \n observable = generate_observable()\n indicator.add_observable(observable)\n \n container.add_marking(indicator, amber_marking)\n container.add_marking(observable, red_marking)\n \n self.assertTrue(container.is_marked(observable, red_marking))", "def check_format_of_annotation_in_file(self):\n if not self.is_span_valid():\n sys.exit()", "def get_pose_marks(self, marks):\n pose_marks = []\n pose_marks.append(marks[30]) # Nose tip\n pose_marks.append(marks[8]) # Chin\n pose_marks.append(marks[36]) # Left eye left corner\n pose_marks.append(marks[45]) # Right eye right corner\n pose_marks.append(marks[48]) # Mouth left corner\n pose_marks.append(marks[54]) # Mouth right corner\n return pose_marks", "def get_marks(self):\n if not hasattr(self, '_BasePublication__marks_cache'):\n tree_opts = Rubric._mptt_meta\n self.__marks_cache = PublicationCharacteristicOrMarkSet(\n self.get_active_rubrics_for_marks(),\n self.get_additional_marks(),\n Rubric.ATTRIBUTE_IS_MARK,\n tree_opts)\n return self.__marks_cache", "def test_embedded_observable_marking(self):\n container = stixmarx.new()\n package = container.package\n red_marking = generate_marking_spec(generate_red_marking_struct())\n amber_marking = generate_marking_spec(generate_amber_marking_struct())\n \n indicator = Indicator(title=\"Test\")\n package.add_indicator(indicator)\n \n observable = generate_observable()\n indicator.add_observable(observable)\n \n container.add_marking(indicator, red_marking, descendants=True)\n container.add_global(amber_marking)\n \n self.assertTrue(container.is_marked(observable, red_marking))\n self.assertTrue(container.is_marked(observable, amber_marking))", "def test_check_consistency1():\n\n roi = ROI()\n\n labels = ['a', 'b', 'c']\n lrs = ['l', 'r', 'l']\n\n roi.set_labels(labels, lrs)\n\n roi.check_consistency()\n\n roi.labels.pop()\n\n with raises(InconsistentDataError):\n assert roi.check_consistency()", "def __init__(self, marker, marker_set):\n assert isinstance(marker, list)\n assert len(marker) > 0\n assert all([len(x) == len(marker[0]) for x in marker[1:]])\n assert all([all(x in marker_set for x in row) for row in marker])\n assert all([x == \"*\" or x == \".\" or x == \"#\" for x in marker_set])\n self._marker, self._marker_set = marker, marker_set", "def __init__(self, marker, marker_set):\n assert isinstance(marker, list)\n assert len(marker) > 0\n assert all([len(x) == len(marker[0]) for x in marker[1:]])\n assert all([all(x in marker_set for x in row) for row in marker])\n assert all([x == \"*\" or x == \".\" or x == \"#\" for x in marker_set])\n self._marker, self._marker_set = marker, marker_set", "def __init__(self, marker, marker_set):\n assert isinstance(marker, list)\n assert len(marker) > 0\n assert all([len(x) == len(marker[0]) for x in marker[1:]])\n assert all([all(x in marker_set for x in row) for row in marker])\n assert all([x == \"*\" or x == \".\" or x == \"#\" for x in marker_set])\n self._marker, self._marker_set = marker, marker_set", "def __init__(self, marker, marker_set):\n assert isinstance(marker, list)\n assert len(marker) > 0\n assert all([len(x) == len(marker[0]) for x in marker[1:]])\n assert all([all(x in marker_set for x in row) for row in marker])\n assert all([x == \"*\" or x == \".\" or x == \"#\" for x in marker_set])\n self._marker, self._marker_set = marker, marker_set", "def check_prt_and_commits(self, prt, commits, marks):\n self._current_prt = prt\n self.fast_export_marks = marks\n self.fast_export_marks.set_head(prt.ref)\n self.check_p4gf_user_write_permission()\n self.check_commits(commits)", "def _assert_ks_postformat(self):\n if type(self.idxs) in [list, np.ndarray]:\n if self.ks is None:\n if self.staticneighs:\n pass\n else:\n self.ks = range(len(self.idxs))\n if self.staticneighs:\n pass\n else:\n# print self.ks, self.idxs, self.set_neighs, self.set_sp_rel_pos\n assert(len(self.ks) == len(self.idxs))\n ## Defining functions\n if self.sp_relative_pos is not None and self.staticneighs:\n self.get_sp_rel_pos = self._static_get_rel_pos\n elif not self.staticneighs:\n if type(self.sp_relative_pos) == list:\n self.get_sp_rel_pos = self._dynamic_rel_pos_list\n else:\n self.get_sp_rel_pos = self._dynamic_rel_pos_array\n if self.sp_relative_pos is None:\n self.set_sp_rel_pos = self._null_set_rel_pos\n self.get_sp_rel_pos = self._null_get_rel_pos\n ## Ensure correct k_ret\n if np.max(self.ks) > self._kret:\n self._kret = np.max(self.ks)", "def set_mark(self, perception: Perception) -> None:\n if self.mark.set_mark_using_condition(self.condition, perception):\n self.ee = False", "def mark_mines(self, cells):\r\n for cell in cells:\r\n row, col = cell\r\n self.mine_field[row][col] = 'x'\r\n self.mines_left -= 1\r\n return", "def check_glyph_name_in_glyph_set(self, *names):\n if self.glyphNames_:\n for name in names:\n if name in self.glyphNames_:\n continue\n if name not in self.missing:\n self.missing[name] = self.cur_token_location_", "def checks(self, error_margin=0.1):\n\n # Check all compartments are positive\n for label in self.labels:\n assert self.compartments[label] >= 0.", "def test_badyvaluewithsets(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, {1, 2, 3}, 3)\n self.assertEqual(str(e.exception), 'y must be an integer')", "def test_global_marking_on_observables(self):\n container = stixmarx.new()\n package = container.package\n red_marking = generate_marking_spec(generate_red_marking_struct())\n container.add_global(red_marking)\n \n observable = generate_observable()\n package.add_observable(observable)\n \n self.assertTrue(container.is_marked(observable, red_marking))\n self.assertFalse(container.is_marked(observable, MarkingSpecification()))", "def test_remove_parent_marking_for_observable_failure(self):\n container = stixmarx.new()\n package = container.package\n red_marking = generate_marking_spec(generate_red_marking_struct())\n \n indicator = Indicator(title=\"Test\")\n package.add_indicator(indicator)\n \n observable = generate_observable()\n indicator.add_observable(observable)\n \n container.add_marking(indicator, red_marking, descendants=True)\n\n self.assertTrue(container.is_marked(observable, red_marking))\n self.assertTrue(container.is_marked(indicator, red_marking))\n self.assertRaises(errors.MarkingRemovalError, container.remove_marking, observable, red_marking)", "def draw_marks(image, marks, color=(255, 255, 255)):\r\n for mark in marks:\r\n cv2.circle(image, (int(mark[0]), int(\r\n mark[1])), 2, color, -1, cv2.LINE_AA)", "def draw_marks(image, marks, color=(255, 255, 255)):\r\n for mark in marks:\r\n cv2.circle(image, (int(mark[0]), int(\r\n mark[1])), 1, color, -1, cv2.LINE_AA)", "def validate(self):\n self.pltw.blklst[self.blkno][self.ypos] = self.data[2]\n self.pltw.updatePlot()\n self.pltw.dirty = True\n self.parent.fitmodel = self.model\n self.pltw.activecurv = self.cpos\n self.parent.updateUI()\n self.hide()", "def test_kyc_put_legal(self):\n pass", "def test_saved_sgrid_attributes(self):\n u1_var = self.target.U1\n u1_var_center_avg_axis = u1_var.center_axis\n expected_u1_center_axis = 0\n u1_vector_axis = u1_var.vector_axis\n expected_u1_vector_axis = 'X'\n original_angles = self.sg_obj.angles\n saved_angles = self.target.angles\n self.assertEqual(u1_var_center_avg_axis, expected_u1_center_axis)\n self.assertEqual(u1_vector_axis, expected_u1_vector_axis)\n np.testing.assert_almost_equal(original_angles, saved_angles, decimal=3)", "def test_remove_parent_marking_failure(self):\n container = stixmarx.new()\n package = container.package\n red_marking = generate_marking_spec(generate_red_marking_struct())\n \n incident = Incident(title=\"Test\")\n package.add_incident(incident)\n \n indicator = Indicator(title=\"Test\")\n incident.related_indicators.append(indicator)\n \n container.add_marking(incident, red_marking, descendants=True)\n self.assertTrue(container.is_marked(incident, red_marking))\n self.assertTrue(container.is_marked(indicator, red_marking))\n self.assertRaises(errors.MarkingRemovalError, container.remove_marking, indicator, red_marking, True)", "def dump(self, mark):", "def validate(self):\n self.pltw.blklst[self.blkno][self.ypos] = self.data[2]\n self.pltw.updatePlot()\n self.pltw.dirty = True\n self.pltw.activecurv = self.cpos\n self.parent.updateUI()\n self.hide()", "def validate(self):\n self.pltw.blklst[self.blkno][self.ypos] = self.data[2]\n self.pltw.updatePlot()\n self.pltw.dirty = True\n self.pltw.activecurv = self.cpos\n self.parent.updateUI()\n self.hide()", "def mark_safe(self, cell):\n self.safes.add(cell)\n for sentence in self.knowledge:\n sentence.mark_safe(cell)", "def mark_safe(self, cell):\n self.safes.add(cell)\n for sentence in self.knowledge:\n sentence.mark_safe(cell)", "def mark_safe(self, cell):\n self.safes.add(cell)\n for sentence in self.knowledge:\n sentence.mark_safe(cell)", "def mark_safe(self, cell):\n self.safes.add(cell)\n for sentence in self.knowledge:\n sentence.mark_safe(cell)", "def mark_safe(self, cell):\n self.safes.add(cell)\n for sentence in self.knowledge:\n sentence.mark_safe(cell)", "def __validate_boundary_notes(self) -> None:\n if not self.cantus_firmus[0].scale_element.is_from_tonic_triad:\n raise ValueError(\n f\"{self.cantus_firmus[0].scale_element.note} is not \"\n f\"a tonic triad member for {self.tonic}-{self.scale_type}; \"\n f\"therefore, cantus firmus can not start with it.\"\n )\n if not self.cantus_firmus[-1].scale_element.is_from_tonic_triad:\n raise ValueError(\n f\"{self.cantus_firmus[-1].scale_element.note} is not \"\n f\"a tonic triad member for {self.tonic}-{self.scale_type}; \"\n f\"therefore, cantus firmus can not end with it.\"\n )\n if not self.counterpoint[0].scale_element.is_from_tonic_triad:\n raise ValueError(\n f\"{self.counterpoint[0].scale_element.note} is not \"\n f\"a tonic triad member for {self.tonic}-{self.scale_type}; \"\n f\"therefore, counterpoint line can not start with it.\"\n )\n if not self.end_scale_element.is_from_tonic_triad:\n raise ValueError(\n f\"{self.end_scale_element.note} is not \"\n f\"a tonic triad member for {self.tonic}-{self.scale_type}; \"\n f\"therefore, counterpoint line can not end with it.\"\n )\n lowest_position = self.lowest_element.position_in_semitones\n highest_position = self.highest_element.position_in_semitones\n if lowest_position >= highest_position:\n raise ValueError(\n \"Lowest note and highest note are in wrong order: \"\n f\"{self.counterpoint_specifications['lowest_note']} \"\n \"is higher than \"\n f\"{self.counterpoint_specifications['highest_note']}.\"\n )", "def test_case_assumptions(self):\n self.assertTrue(\n self.existing_map_id in self.example_map.feedline_map_lookup,\n msg=f'Expects {self.existing_map_id} to be in example map keys ({list(self.example_map.feedline_map_lookup.keys())}).'\n )\n feedline_map: FeedlineMap = self.example_map.feedline_map_lookup[self.existing_map_id]\n self.assertTrue(\n self.existing_feedline_nr in feedline_map.bitmap_lookup,\n msg=f'Expects {self.existing_feedline_nr} to be in example map keys ({list(feedline_map.bitmap_lookup.keys())}).'\n )\n self.assertFalse(\n self.not_existing_map_id in self.example_map.feedline_map_lookup,\n msg=f'Expects {self.existing_map_id} NOT to be in example map keys ({list(self.example_map.feedline_map_lookup.keys())}).'\n )\n self.assertFalse(\n self.not_existing_feedline_nr in feedline_map.bitmap_lookup,\n msg=f'Expects {self.not_existing_feedline_nr} NOT to be in example map keys ({list(feedline_map.bitmap_lookup.keys())}).'\n )", "def test_DistanceMatrices_setter_wrong_number(self):\r\n self.assertRaises(ValueError, setattr, self.mc, 'DistanceMatrices',\r\n [self.overview_dm])\r\n self.assertRaises(ValueError, setattr, self.mc, 'DistanceMatrices',\r\n [self.overview_dm, self.overview_dm, self.overview_dm])", "def mark_sq8(self):\n self.drive_inches(15, 400)\n self.turn_degrees(90, 400)\n self.drive_inches(13, 400)\n ev3.Sound.speak('Place Mark').wait()\n self.arm_calibration()\n self.drive_inches(-13, 400)\n self.turn_degrees(-90, 400)\n self.drive_inches(-15, 400)", "def reset_annotations(self):\n # FIXME: this state does not make sense\n self.annotation_date_set = False\n self.annotation_comment_set = False\n self.annotation_type_set = False\n self.annotation_spdx_id_set = False", "def test_problem_marked_correct(self):\r\n\r\n self.context['status'] = Status('correct')\r\n self.context['input_type'] = 'checkbox'\r\n self.context['value'] = self.VALUE_DICT\r\n\r\n # Should mark the entire problem correct\r\n xml = self.render_to_xml(self.context)\r\n xpath = \"//div[@class='indicator_container']/span[@class='status correct']\"\r\n self.assert_has_xpath(xml, xpath, self.context)\r\n\r\n # Should NOT mark individual options\r\n self.assert_no_xpath(xml, \"//label[@class='choicetextgroup_incorrect']\",\r\n self.context)\r\n\r\n self.assert_no_xpath(xml, \"//label[@class='choicetextgroup_correct']\",\r\n self.context)", "def check(self):\n self.initial_scatter = ax.scatter(self.skel_points[:, 0],\n self.skel_points[:, 1],\n self.skel_points[:, 2], s=10, c='r')\n self.cell_points = self.get_cell_xyz()\n ax.scatter(self.cell_points[::50, 0],\n self.cell_points[::50, 1],\n self.cell_points[::50, 2], s=3, c='b', alpha=.1)\n ax.set_xlabel('X (um)')\n ax.set_ylabel('Y (um)')\n ax.set_zlabel('Z (um)')", "def test_DistanceMatrices_setter_wrong_number(self):\r\n self.assertRaises(ValueError, setattr, self.overview_mantel,\r\n 'DistanceMatrices', [self.overview_dm])\r\n self.assertRaises(ValueError, setattr, self.overview_mantel,\r\n 'DistanceMatrices', [self.overview_dm, self.overview_dm,\r\n self.overview_dm])", "def test_embedded_observable_marking_removal(self):\n container = stixmarx.new()\n package = container.package\n red_marking = generate_marking_spec(generate_red_marking_struct())\n \n indicator = Indicator(title=\"Test\")\n package.add_indicator(indicator)\n \n observable = generate_observable()\n indicator.add_observable(observable)\n \n container.add_marking(observable, red_marking)\n self.assertTrue(container.is_marked(observable, red_marking))\n\n container.remove_marking(observable, red_marking)\n self.assertFalse(container.is_marked(observable, red_marking))", "def __check_input__(self):\n # | - __check_input__\n tmp = set(self.tree_level_labels)\n input_diff = tmp.symmetric_difference(self.level_entries.keys())\n if not input_diff == set():\n undefined_labels = []\n for i in input_diff:\n undefined_labels.append(i)\n\n print(\"\\n\")\n message = \"Did not fill out level entries dict properly\" + \"\\n\"\n message += \"The following properties need to be defined\" + \"\\n\"\n message += str(undefined_labels)\n raise ValueError(message)\n # __|", "def move(self, coord, mark):\n self.arr[coord] = mark", "def test_MetadataMap_setter_invalid_input(self):\r\n self.assertRaises(TypeError, setattr, self.cs_overview, 'MetadataMap',\r\n \"foo\")\r\n self.assertRaises(TypeError, setattr, self.cs_overview, 'MetadataMap',\r\n [])\r\n self.assertRaises(TypeError, setattr, self.cs_overview, 'MetadataMap',\r\n {})\r\n self.assertRaises(TypeError, setattr, self.cs_overview, 'MetadataMap',\r\n None)\r\n self.assertRaises(TypeError, setattr, self.cs_overview, 'MetadataMap',\r\n self.overview_dm)", "def mark(self, markName, markTime):\n if markName not in self._marks:\n self._marks[markName] = markTime", "def set_move(self, position: Point, mark: Mark) -> None:\n\t\tif mark == Mark.X:\n\t\t\tself.tiles[position.x][position.y] = 1\n\t\telse:\n\t\t\tself.tiles[position.x][position.y] = -1", "def test_embedded_marking_removal(self):\n container = stixmarx.new()\n package = container.package\n red_marking = generate_marking_spec(generate_red_marking_struct())\n \n incident = Incident(title=\"Test\")\n package.add_incident(incident)\n \n indicator = Indicator(title=\"Test\")\n incident.related_indicators.append(indicator)\n \n container.add_marking(indicator, red_marking)\n self.assertTrue(container.is_marked(indicator, red_marking))\n\n container.remove_marking(indicator, red_marking)\n self.assertFalse(container.is_marked(indicator, red_marking))", "def diff_marks(font_before, font_after, marks_before, marks_after,\n name=None, thresh=4, scale_upms=True):\n upm_before = font_before.ttfont['head'].unitsPerEm\n upm_after = font_after.ttfont['head'].unitsPerEm\n\n charset_before = set([font_before.glyph(g).key for g in font_before.glyphset])\n charset_after = set([font_after.glyph(g).key for g in font_after.glyphset])\n\n marks_before_h = {i['base_glyph'].key+i['mark_glyph'].key: i for i in marks_before\n if i['base_glyph'].key in charset_after and i['mark_glyph'].key in charset_after}\n marks_after_h = {i['base_glyph'].key+i['mark_glyph'].key: i for i in marks_after\n if i['base_glyph'].key in charset_before and i['mark_glyph'].key in charset_before}\n\n missing = _subtract_items(marks_before_h, marks_after_h)\n new = _subtract_items(marks_after_h, marks_before_h)\n modified = _modified_marks(marks_before_h, marks_after_h, thresh,\n upm_before, upm_after, scale_upms=True)\n\n new = DiffTable(name + \"_new\", font_before, font_after, data=new, renderable=True)\n new.report_columns([\"base_glyph\", \"base_x\", \"base_y\",\n \"mark_glyph\", \"mark_x\", \"mark_y\"])\n new.sort(key=lambda k: abs(k[\"base_x\"]) - abs(k[\"mark_x\"]) + \\\n abs(k[\"base_y\"]) - abs(k[\"mark_y\"]))\n\n missing = DiffTable(name + \"_missing\", font_before, font_after, data=missing,\n renderable=True)\n missing.report_columns([\"base_glyph\", \"base_x\", \"base_y\",\n \"mark_glyph\", \"mark_x\", \"mark_y\"])\n missing.sort(key=lambda k: abs(k[\"base_x\"]) - abs(k[\"mark_x\"]) + \\\n abs(k[\"base_y\"]) - abs(k[\"mark_y\"]))\n modified = DiffTable(name + \"_modified\", font_before, font_after, data=modified,\n renderable=True)\n modified.report_columns([\"base_glyph\", \"mark_glyph\", \"diff_x\", \"diff_y\"])\n modified.sort(key=lambda k: abs(k[\"diff_x\"]) + abs(k[\"diff_y\"]), reverse=True)\n return {\n \"new\": new,\n \"missing\": missing,\n \"modified\": modified,\n }", "def _check_consistency(self) -> None:\n lbl_vals_from_metadata = set(self.infos.keys())\n lbl_vals_from_data = set(np.unique(self.data))\n # TODO: check if numerical datatype shenanigans ruin the day\n # i.e. something along the lines of 1.0 != 1\n symm_diff = lbl_vals_from_data ^ lbl_vals_from_metadata\n\n if len(symm_diff) != 0:\n msg = (f'Label mismatch between data and metadata! Expected vanishing '\n f'symmetric difference but got: {symm_diff}')\n raise ValueError(msg)", "def test_embedded_component_marking(self):\n container = stixmarx.new()\n package = container.package\n red_marking = generate_marking_spec(generate_red_marking_struct())\n amber_marking = generate_marking_spec(generate_amber_marking_struct())\n \n incident = Incident(title=\"Test\")\n package.add_incident(incident)\n \n indicator = Indicator(title=\"Test\")\n incident.related_indicators.append(indicator)\n \n container.add_marking(incident, red_marking, descendants=True)\n container.add_global(amber_marking)\n \n self.assertTrue(container.is_marked(indicator, red_marking))\n self.assertTrue(container.is_marked(indicator, amber_marking))", "def check_attr(self):\n super(Scatter, self).check_attr()", "def _handleMarkernoChangedDelete(self):\n \n # Get previous markerno\n # update markerno's >prev_markerno to markerno + 1\n # update of_places set markerno = markerno + 1 where territoryno = '4-1-2' and markerno is not null\n x=0\n pass", "def mark_sq1(self):\n self.drive_inches(6.5, 400)\n ev3.Sound.speak('Place Mark').wait()\n self.arm_calibration()\n self.drive_inches(-6.5, 400)", "def test_DistanceMatrices_setter_wrong_number(self):\r\n self.assertRaises(ValueError, setattr, self.pm,\r\n 'DistanceMatrices', [self.overview_dm])\r\n self.assertRaises(ValueError, setattr, self.pm,\r\n 'DistanceMatrices', [self.overview_dm, self.overview_dm])", "def __do_essential_memebers_exist__(self):\n assert self.element_type is not None\n assert self.elements is not None\n assert self.points is not None", "def test_badxvaluewithsets(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, {1, 2, 3}, 2, 3)\n self.assertEqual(str(e.exception), 'x must be an integer')", "def test__markMarkdown_textOnly1(self):\n self._degrotesque._restoreDefaultElementsToSkip()\n assert(self._degrotesque._markMarkdown(\"Hallo\")==\"00000\")", "def mark_safe(self, cell):\n \n self.safes.add(cell)\n \n for sentence in self.knowledge:\n sentence.mark_safe(cell)", "def check_win():\r\n for mark in markers:\r\n if loc[0] == mark and loc[1] == mark and loc[2] == mark:\r\n return True\r\n if loc[0] == mark and loc[3] == mark and loc[6] == mark:\r\n return True\r\n if loc[0] == mark and loc[4] == mark and loc[8] == mark:\r\n return True\r\n if loc[1] == mark and loc[4] == mark and loc[7] == mark:\r\n return True\r\n if loc[2] == mark and loc[4] == mark and loc[6] == mark:\r\n return True\r\n if loc[2] == mark and loc[5] == mark and loc[8] == mark:\r\n return True\r\n if loc[3] == mark and loc[4] == mark and loc[5] == mark:\r\n return True\r\n if loc[6] == mark and loc[7] == mark and loc[8] == mark:\r\n return True\r\n else:\r\n return False", "def parse_test_marks(text):\n marks = {}\n for m in TEST_MARKS.finditer(text):\n key, value = m.groups()\n marks[key] = value.strip()\n return marks", "def test__markMarkdown_textOnly3(self):\n self._degrotesque._restoreDefaultElementsToSkip()\n assert(self._degrotesque._markMarkdown(\"Hallo Mama!\")==\"00000000000\")" ]
[ "0.6549789", "0.64396226", "0.6256343", "0.5967002", "0.59457564", "0.58952796", "0.58587354", "0.5848375", "0.58305866", "0.58305866", "0.5808072", "0.57763886", "0.5767603", "0.5664824", "0.5655856", "0.5617224", "0.5596353", "0.55772895", "0.55504787", "0.55380124", "0.5525713", "0.5499428", "0.5494318", "0.54542", "0.5450722", "0.5446742", "0.5399051", "0.5373586", "0.5370118", "0.5342338", "0.53155655", "0.53040147", "0.5294627", "0.5285473", "0.5273788", "0.52641374", "0.5258425", "0.52436155", "0.5232842", "0.52291787", "0.52275354", "0.52180076", "0.5212686", "0.5211591", "0.51849735", "0.51849735", "0.51849735", "0.51849735", "0.5175513", "0.517228", "0.5167495", "0.51564085", "0.5147492", "0.5147078", "0.5134956", "0.5128608", "0.5124205", "0.51240927", "0.51223296", "0.51186776", "0.5091219", "0.5089244", "0.50853103", "0.50739425", "0.5062151", "0.5062151", "0.5059508", "0.5059508", "0.5059508", "0.5059508", "0.5059508", "0.5057802", "0.5041724", "0.50361615", "0.5020689", "0.5009761", "0.5005054", "0.50000167", "0.49990118", "0.49977374", "0.49859655", "0.49814323", "0.49753982", "0.4968799", "0.49687126", "0.49625987", "0.49609554", "0.49604136", "0.49591643", "0.49558812", "0.49552777", "0.495202", "0.494933", "0.49407226", "0.4939516", "0.4935279", "0.49311727", "0.492513", "0.49234", "0.49212343" ]
0.5462163
23
Given string like "test_odm_pcb (tests.wedge100.test_eeprom.EepromTest)" convert to a python test format to use it directly
def format_into_test_path(self, testitem): test_string = testitem.split("(") test_path = test_string[1].split(")")[0] """ Test item has different path with different python version example: on 3.8 set_psu_cmd (tests.fuji.test_psu.Psu1Test) on 3.11 set_psu_cmd (tests.fuji.test_psu.Psu1Test.set_psu_cmd) """ if test_path.split(".")[-1].strip() != test_string[0].strip(): test_path = test_path + "." + test_string[0] return test_path.strip()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split(test_name):\n recipe, simple_test_name = test_name.split('.', 1)\n return recipe, simple_test_name", "def test_string():", "def test_parse_string(self):\n bb = parse(antlr4.InputStream(test_file))\n\n assert bb._var == {\"alpha\": 0.3423}\n\n expected = {\"name\": \"fock\", \"options\": {\"num_subsystems\": 1, \"cutoff_dim\": 7, \"shots\": 10}}\n assert bb.target == expected\n\n expected = [\n {\"op\": \"Coherent\", \"args\": [0.3423, np.sqrt(np.pi)], \"kwargs\": {}, \"modes\": [0]},\n {\"op\": \"MeasureFock\", \"args\": [], \"kwargs\": {}, \"modes\": [0]},\n ]\n\n assert bb.operations == expected", "def test_load_object_from_string():\n tests = (\n (\"string.Template\", string.Template),\n (\"os.path.basename\", os.path.basename),\n (\"string.ascii_letters\", string.ascii_letters)\n )\n for test in tests:\n assert load_object_from_string(test[0]) is test[1]", "def test_parser():\n return parser(\"Testing\", \"Use this from a test\", \"\")", "def test__read_scenario_files(self):\n test_str = '<sequence_demo><adaptivenumericinput />'\n test_result = _read_scenario_files()\n self.assertEqual(test_str, test_result[0:len(test_str)])", "def test_convert():", "def flat_test_name(_id):\n return \"-\".join(_id.split(\".\")[1:])", "def parse_gtest_tests(gtest_output_raw: str):\n test_list = []\n current_test_prefix = ''\n gtest_output_split = gtest_output_raw.split('\\n')\n current_index = 0\n # skip to the actual test list\n while current_index < len(gtest_output_split):\n current_string = gtest_output_split[current_index]\n test_matches = re.findall(r'^[a-zA-Z]*\\.$', current_string)\n if len(test_matches) != 0:\n break\n current_index += 1\n while current_index < len(gtest_output_split):\n current_string = gtest_output_split[current_index]\n if len(current_string) == 0:\n current_index += 1\n continue\n # get the test name\n test_match = re.findall(r'^\\s*\\S*', current_string)[0].replace(' ', '')\n if test_match[len(test_match) - 1] == '.':\n # We've found a new prefix\n current_test_prefix = test_match\n current_index += 1\n continue\n test_list.append(current_test_prefix + test_match)\n current_index += 1\n return test_list", "def test_string():\n pass", "def test():\n LowerCaseStr().from_python('ABC')", "def test_decode():", "def test_0070(self):\n log_v2_0_str = self._read_xml('log_v2_0.xml')\n log_v1_str = c.str_to_v1_str(log_v2_0_str)\n self.assertTrue(c.str_is_v1(log_v1_str))\n self.assertFalse(c.str_is_v2(log_v1_str))", "def test_simple_parse(self):\n pass", "def test_strings(self):\n topo = Topology()\n ud = []\n ud.append(u'⡍⠔⠙⠖ ⡊ ⠙⠕⠝⠰⠞ ⠍⠑⠁⠝ ⠞⠕ ⠎⠁⠹ ⠹⠁⠞ ⡊ ⠅⠝⠪⠂ ⠕⠋ ⠍⠹')\n ud.append(u'2H₂ + O₂ ⇌ 2H₂O, R = 4.7 kΩ, ⌀ 200 mm')\n ud.append(u'многоязычных')\n ud.append(\"Arsenal hammered 5-1 by Bayern again\")\n s = topo.source(ud, name=u'façade')\n sas = s.as_string()\n sd = s.map(lambda s : {'val': s + u\"_test_it!\"})\n tester = Tester(topo)\n tester.contents(s, ud)\n tester.contents(sas, ud)\n dud = []\n for v in ud:\n dud.append({'val': v + u\"_test_it!\"})\n tester.contents(sd, dud)\n\n tester.test(self.test_ctxtype, self.test_config)", "def parse_modelname(string,labellist,ensemblesfolder):\n ## We need to account for two different prefixes now. \n split_ens_temp = ensemble_template.split(\"{f}\")\n template_prefix = split_ens_temp[0]\n\n template_seedind = split_ens_temp[1].split(\"{s}\")[0]\n if string.startswith(template_prefix): ## TODO or other prefix\n frames,seedext = string.split(template_prefix)[-1].split(template_seedind)\n seed=seedext.split(\"results.json\")[0]\n return {\"name\":string,\n \"frames\":int(frames),\n \"seed\":int(seed),\n \"template\":ensemble_template,\n \"outliers\":determine_outliers(labellist,int(seed),int(frames)),\n }", "def extract_test_name(base_path):\n name = p.basename(base_path)\n if name == \"test.py\":\n name = \"\"\n elif name.startswith(\"test_\") and name.endswith(\".py\"):\n name = name[len(\"test_\") : (len(name) - len(\".py\"))]\n return name", "def get_TestEntry_instance(string, config):\n paren_i = string.find(\"(\")\n if paren_i > 0:\n args = string[paren_i+1:-1]\n string = string[:paren_i]\n args, kwargs = core.parse_args(args)\n else:\n args = ()\n kwargs = {}\n try:\n cls = module.get_object(string)\n except (module.ModuleImportError, module.ObjectImportError), err:\n logging.warn(err)\n return None\n testinstance = cls(config)\n return core.TestEntry(testinstance, args, kwargs, False)", "def test_name(self):\r\n parts = []\r\n if self.test.__module__ != '__main__':\r\n parts.append(self.test.__module__)\r\n if hasattr(self.test, 'im_class'):\r\n parts.append(self.test.im_class.__name__)\r\n parts.append(self.test.__name__)\r\n return '.'.join(parts)", "def get_instance(string):\n row = string.split(\".\")\n\n # handles \"f.eid\" case\n if len(row) < 4:\n return \"0\"\n\n # the number is somewhat arbitrary... \n # it is determined by Joeri's UK Phenotypes script.\n # (which is \"get_UKphenotypes.r\" --- thanks Joeri!)\n return row[2]", "def test_s2():\n vc = vtec.parse(EX1)\n assert vc[0].s2() == \"TO.W\"", "def nemo(a_string):\n return NemoParser().parse(a_string)", "def test_str(self, string, application):\n assert string == str(application)", "def main():\n\ttest() #test ParseError", "def SplitTestPath(test_result, test_path_format):\n if test_path_format == TELEMETRY_TEST_PATH_FORMAT:\n separator = '/'\n elif test_path_format == GTEST_TEST_PATH_FORMAT:\n separator = '.'\n else:\n raise ValueError('Unknown test path format: %s' % test_path_format)\n\n test_path = test_result['testPath']\n if separator not in test_path:\n raise ValueError('Invalid test path: %s' % test_path)\n\n return test_path.split(separator, 1)", "def test_stringToString(self):\n self.assertNativeString(\"Hello!\", \"Hello!\")", "def import_function(s):\n a = s.split('.')\n j = lambda x: '.'.join(x)\n return getattr(import_module(j(a[:-1])), a[-1])", "def test_tb12_strings():\n\n err = _do_test_raw(\"\"\"\n var f = \"editImageMapButton.label\";\n var x = \"haveSmtp1.suffix2\";\n \"\"\", versions=TB12_DEFINITION)\n assert err.failed()\n assert err.warnings\n assert err.notices\n assert err.compat_summary[\"errors\"]", "def test_nested_msgs(self):\n data = StringIO(NESTED_MSGS_PROTO_SPEC)\n ppp = StringProtoSpecParser(data) # data should be file-like\n str_obj_model = ppp.parse() # object model from string serialization\n self.assertIsNotNone(str_obj_model)\n self.assertTrue(isinstance(str_obj_model, M.ProtoSpec))\n\n self.assertEqual('org.xlattice.zoggery.nm', str_obj_model.name)\n self.assertEqual(1, len(str_obj_model.msgs))\n msg = str_obj_model.msgs[0]\n\n self.assertEqual('nestedMsgs', msg.name)\n enums = msg.enums\n self.assertIsNotNone(enums)\n self.assertEqual(2, len(enums))\n\n foo_enum = enums[0]\n bar_enum = enums[1]\n self.assertEqual('Foo', foo_enum.name)\n self.assertEqual('Bar', bar_enum.name)\n\n self.assertEqual(2, len(foo_enum))\n self.assertEqual(3, len(bar_enum))\n\n a_pair = foo_enum[0]\n self.assertEqual('a', a_pair.symbol)\n self.assertEqual(1, a_pair.value)\n\n b_pair = foo_enum[1]\n self.assertEqual('b', b_pair.symbol)\n self.assertEqual(2, b_pair.value)\n\n c_pair = bar_enum[0]\n self.assertEqual('c', c_pair.symbol)\n self.assertEqual(3, c_pair.value)\n\n d_pair = bar_enum[1]\n self.assertEqual('d', d_pair.symbol)\n self.assertEqual(4, d_pair.value)\n\n e_pair = bar_enum[2]\n self.assertEqual('e', e_pair.symbol)\n self.assertEqual(5, e_pair.value)\n\n self.round_trip_poto_spec_via_string(str_obj_model)", "def testStringRepresentation(self):\n strRep = str(self.msTest)\n expectedStrRep = \"Computer algebra systems and commands:\\n\\\n Magma : magma\\n\\\n Maple : maple\\n\\\n Singular : Singular\\n\\\nTime Command: time -p\"\n self.assertEqual(strRep,expectedStrRep,\n \"Something was wrong with the string representation of\\\nan instance of the class MachineSettings\")", "def test_str(self):\n # Continuous ROMs\n model = roi._core.InferredContinuousROM(\"A\")\n assert str(model) == \\\n \"Reduced-order model structure: dx / dt = Ax(t)\"\n model.modelform = \"cA\"\n assert str(model) == \\\n \"Reduced-order model structure: dx / dt = c + Ax(t)\"\n model.modelform = \"HB\"\n assert str(model) == \\\n \"Reduced-order model structure: dx / dt = H(x(t) ⊗ x(t)) + Bu(t)\"\n model.modelform = \"G\"\n assert str(model) == \\\n \"Reduced-order model structure: dx / dt = G(x(t) ⊗ x(t) ⊗ x(t))\"\n model.modelform = \"cH\"\n assert str(model) == \\\n \"Reduced-order model structure: dx / dt = c + H(x(t) ⊗ x(t))\"\n\n # Discrete ROMs\n model = roi._core.IntrusiveDiscreteROM(\"A\")\n assert str(model) == \\\n \"Reduced-order model structure: x_{j+1} = Ax_{j}\"\n model.modelform = \"cB\"\n assert str(model) == \\\n \"Reduced-order model structure: x_{j+1} = c + Bu_{j}\"\n model.modelform = \"H\"\n assert str(model) == \\\n \"Reduced-order model structure: x_{j+1} = H(x_{j} ⊗ x_{j})\"", "def FromLine(cls, line):\n try:\n attrs, line = line.split('|', 1)\n\n if attrs.strip() != 'flaky':\n return None\n\n line = line.strip()\n flaky = True\n except ValueError:\n flaky = False\n\n fields = cls.LINE_RE.match(line.strip())\n\n if fields:\n result, path, variant = fields.groups()\n\n # some of the tests are generated in build dir and are issued from there,\n # because every test run is performed in randomly named tmp directory we\n # need to remove random part\n try:\n # assume that 2nd field is a test path\n path_parts = path.split('/')\n\n index = path_parts.index('testsuite')\n path = '/'.join(path_parts[index + 1:])\n except ValueError:\n path = '/'.join(path_parts)\n\n # Remove junk from test description.\n variant = variant.strip(', ')\n\n substitutions = [\n # remove include paths - they contain name of tmp directory\n ('-I\\S+', ''),\n # compress white spaces\n ('\\s+', ' ')\n ]\n\n for pattern, replacement in substitutions:\n variant = re.sub(pattern, replacement, variant)\n\n # Some tests separate last component of path by space, so actual filename\n # ends up in description instead of path part. Correct that.\n try:\n first, rest = variant.split(' ', 1)\n except ValueError:\n pass\n else:\n if first.endswith('.o'):\n path = os.path.join(path, first)\n variant = rest\n\n # DejaGNU framework errors don't contain path part at all, so description\n # part has to be reconstructed.\n if not any(os.path.basename(path).endswith('.%s' % suffix)\n for suffix in ['h', 'c', 'C', 'S', 'H', 'cc', 'i', 'o']):\n variant = '%s %s' % (path, variant)\n path = ''\n\n # Some tests are picked up from current directory (presumably DejaGNU\n # generates some test files). Remove the prefix for these files.\n if path.startswith('./'):\n path = path[2:]\n\n return cls(path, variant or '', result, flaky=flaky)", "def test_from_string(self):\n from pystarlab.starlab import Story\n king_output = \"king.out\"\n\n king_path = os.path.join(DATA_DIR, king_output)\n with open(king_path, 'r') as f:\n king_str = f.read()\n king_story = Story.from_string(king_str)\n self.assertEquals(king_str, str(king_story))", "def create_test_file_name(test_file):\n 'test.{}'.format(test_file.replace('.py', ''))", "def test_str(self):\n name = str(self.engine)\n self.assertTrue(name.startswith('test_database_'))\n self.assertTrue(name.endswith('.test_docs'))", "def test_explicit_delimiter_specification(self):\n transformer = SplitTransformer()\n transformer.setup(\"test\", {\n \"dateformat\" : \"%Y-%m-%d %H:%M:%S\",\n \"delimiter\" : \"\\.-\\.\",\n \"group_order\" : \"HOST_NAME HOST_ADDRESS PRIORITY FACILITY TIME DATE MESSAGE\" \n })\n teststring = \"test_host.-.42.2.53.52.-.5.-.4.-.11:00:24.-.2012-12-10.-.Testmessage\"\n event = transformer.transform(teststring)\n assert event != None\n assert event[\"host_name\"] == \"test_host\"\n assert event[\"host_address\"] == IPAddress(\"42.2.53.52\")\n assert event[\"priority\"] == \"5\"\n assert event[\"facility\"] == \"4\"\n assert event[\"message\"] == \"Testmessage\"", "def test_0050(self):\n logEntry_v1_0_str = self._read_xml('logEntry_v1_0.xml')\n logEntry_v2_str = c.str_to_v2_str(logEntry_v1_0_str)\n self.assertTrue(c.str_is_v2(logEntry_v2_str))\n self.assertFalse(c.str_is_v1(logEntry_v2_str))", "def test_excalibur_name():\n assert I07Nexus.excalibur_detector_2021 == \"excroi\"\n assert I07Nexus.excalibur_04_2022 == \"exr\"", "def testEventFromString(self):\r\n\r\n self._log.debug( \"\\ntestEventFromString\" )\r\n \r\n evtStr = \"2008-10-26 18:18:24,184 http://id.webbrick.co.uk/events/webbrick/CT,webbrick/9/CT/3,{'srcChannel': 3, 'curhi': 100.0, 'val': 19.600000000000001, 'fromNode': 9, 'curlo': -50.0, 'defhi': 100.0, 'deflo': -50.0}\"\r\n\r\n evt = EventFromString(evtStr)\r\n self._log.debug( \"type %s source %s payload %s\", evt.getType(), evt.getSource(), evt.getPayload() )\r\n self.assertEqual( evt.getType(),\"http://id.webbrick.co.uk/events/webbrick/CT\" )\r\n self.assertEqual( evt.getSource(),\"webbrick/9/CT/3\" )\r\n self.assertNotEqual( evt.getPayload(),None )\r\n od = evt.getPayload()\r\n self.assertEqual( od[\"srcChannel\"], '3')\r\n self.assertEqual( od[\"val\"], '19.600000000000001')", "def test_strings_with_foo(self):\n write this test!", "def test_probabilistic_parsers():", "def test_basic_parsers():", "def test_killer_parser():\n sentence = \"Salut GrandPy ! Est-ce que tu connais l'adresse d'OpenClassrooms ?\"\n address = \"7 Cité Paradis, 75010 Paris\"\n kp = KillerParser()\n test_sentence = kp.sentence_parser(sentence)\n assert test_sentence == \"openclassrooms\"\n test_address = kp.address_parser(address)\n assert test_address == \"cité paradis paris\"", "def cleanup_test_name(name, strip_tags=True, strip_scenarios=False):\n if strip_tags:\n tags_start = name.find('[')\n tags_end = name.find(']')\n if tags_start > 0 and tags_end > tags_start:\n newname = name[:tags_start]\n newname += name[tags_end + 1:]\n name = newname\n\n if strip_scenarios:\n tags_start = name.find('(')\n tags_end = name.find(')')\n if tags_start > 0 and tags_end > tags_start:\n newname = name[:tags_start]\n newname += name[tags_end + 1:]\n name = newname\n\n return name", "def _read_spec_test(filename):\n with open(filename, \"rt\") as file:\n source = file.read()\n return ParsedSpecTest.parse(source)", "def _generate_test_name(source):\n out = source.replace(' ', '_').replace(':', '').replace(',', '').lower()\n return \"test_%s\" % out", "def parseString(self, val):\n \n if not isinstance(val, str):\n raise Exception('Input must be a string!')\n if len(val) < 9:\n raise Exception( 'ESDT Names must be 9 characters!' )\n self.setType( val[:2] )\n self.setTime( val[2] )\n self.setFrequency( val[3] )\n self.setHRes( val[4] )\n self.setVRes( val[5] )\n self.setGroup( val[6:9] )\n tmp = val.split('.')\n if len(tmp) == 4:\n self.setVersion( *tmp[1:] )", "def test_smoke():\n raise SkipTest\n parse('[8]')\n parse('[show \"hey\"]')\n parse('[frob thing with thong]')\n parse('[\"this\" \"thing\"]')\n parse('[[] []]')\n parse('[key: value key2: value2 orphan]')\n parse('[1 + (2 + 3)]')\n parse('[funcs: [term/on-red 8 \"foo\"]]')", "def build_test(base_url, node, input_test = None):\n\n mytest = input_test\n if not mytest:\n mytest = Test()\n\n node = lowercase_keys(flatten_dictionaries(node)) #Clean up for easy parsing\n\n #Copy/convert input elements into appropriate form for a test object\n for configelement, configvalue in node.items():\n #Configure test using configuration elements\n if configelement == u'url':\n assert isinstance(configvalue,str) or isinstance(configvalue,unicode) or isinstance(configvalue,int)\n mytest.url = base_url + unicode(configvalue,'UTF-8').encode('ascii','ignore')\n elif configelement == u'method': #Http method, converted to uppercase string\n var = unicode(configvalue,'UTF-8').upper()\n assert var in HTTP_METHODS\n mytest.method = var\n elif configelement == u'group': #Test group\n assert isinstance(configvalue,str) or isinstance(configvalue,unicode) or isinstance(configvalue,int)\n mytest.group = unicode(configvalue,'UTF-8')\n elif configelement == u'name': #Test name\n assert isinstance(configvalue,str) or isinstance(configvalue,unicode) or isinstance(configvalue,int)\n mytest.name = unicode(configvalue,'UTF-8')\n elif configelement == u'validators':\n #TODO implement more validators: regex, file/schema match, etc\n if isinstance(configvalue, list):\n for var in configvalue:\n myquery = var.get(u'query')\n myoperator = var.get(u'operator')\n myexpected = var.get(u'expected')\n myexportas = var.get(u'export_as')\n\n # NOTE structure is checked by use of validator, do not verify attributes here\n # create validator and add to list of validators\n if mytest.validators is None:\n mytest.validators = list()\n validator = Validator()\n validator.query = myquery\n validator.expected = myexpected\n validator.operator = myoperator if myoperator is not None else validator.operator\n validator.export_as = myexportas if myexportas is not None else validator.export_as\n mytest.validators.append(validator)\n else:\n raise Exception('Misconfigured validator, requires type property')\n elif configelement == u'body': #Read request body, either as inline input or from file\n #Body is either {'file':'myFilePath'} or inline string with file contents\n if isinstance(configvalue, dict) and u'file' in lowercase_keys(configvalue):\n var = lowercase_keys(configvalue)\n assert isinstance(var[u'file'],str) or isinstance(var[u'file'],unicode)\n mytest.body = os.path.expandvars(read_file(var[u'file'])) #TODO change me to pass in a file handle, rather than reading all bodies into RAM\n elif isinstance(configvalue, str):\n mytest.body = configvalue\n else:\n # TODO add ability to handle input of directories or file lists with wildcards to test against multiple bodies\n raise Exception('Illegal input to HTTP request body: must be string or map of file -> path')\n\n elif configelement == 'headers': #HTTP headers to use, flattened to a single string-string dictionary\n mytest.headers = flatten_dictionaries(configvalue)\n elif configelement == 'expected_status': #List of accepted HTTP response codes, as integers\n expected = list()\n #If item is a single item, convert to integer and make a list of 1\n #Otherwise, assume item is a list and convert to a list of integers\n if isinstance(configvalue,list):\n for item in configvalue:\n expected.append(int(item))\n else:\n expected.append(int(configvalue))\n mytest.expected_status = expected\n elif configelement == 'stop_on_failure':\n mytest.stop_on_failure = safe_to_bool(configvalue)\n\n #Next, we adjust defaults to be reasonable, if the user does not specify them\n\n #For non-GET requests, accept additional response codes indicating success\n # (but only if not expected statuses are not explicitly specified)\n # this is per HTTP spec: http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html#sec9.5\n if 'expected_status' not in node.keys():\n if mytest.method == 'POST':\n mytest.expected_status = [200,201,204]\n elif mytest.method == 'PUT':\n mytest.expected_status = [200,201,204]\n elif mytest.method == 'DELETE':\n mytest.expected_status = [200,202,204]\n\n return mytest", "def _testcase_name(testcase):\n name = os.path.splitext(os.path.basename(testcase))[0]\n name = name.replace('-', '_')\n name = 'test_{name}'.format(name=name)\n\n assert name.isidentifier()\n\n return name", "def get_java_name(test):\n\n basename = os.path.basename(test)\n\n # note that this is fragile, but we're being conservative here\n name = basename.split('.')[0]\n\n # build the numeral-number table\n nums = {'0' : \"Zero\",\n '1' : \"One\",\n '2' : \"Two\",\n '3' : \"Three\",\n '4' : \"Four\",\n '5' : \"Five\",\n '6' : \"Six\",\n '7' : \"Seven\",\n '8' : \"Eight\",\n '9' : \"Nine\"}\n\n # do our replacement\n for k, v in nums.items():\n name = name.replace(k, v)\n\n # do the stripping split to obtain our fragments\n undesired_chars = '[' + string.whitespace + string.punctuation + ']'\n fragments = re.split(undesired_chars, name)\n\n # capitalize each fragment\n fragments = [f.capitalize() for f in fragments]\n\n # check the first and last fragments\n if fragments[0] != 'Webkit':\n fragments.insert(0, 'Webkit')\n\n if fragments[-1] != 'Test':\n fragments.append('Test')\n\n # join the results\n return ''.join(fragments)", "def parse_test_id(response):\n\n\n start = response.find('>test')\n start += 6\n end = response.find('log<') - 1\n test_number = int(response[start:end])\n return test_number", "def add_test(self,test):\n l = test.id.split('.')\n s_obj = self\n while len(l) > 0:\n s_name = l.pop(0)\n if len(l) > 0:\n if s_name in s_obj.suites:\n s_obj = s_obj.suites[s_name]\n else:\n new_suite = Suite(s_name,parent=s_obj)\n s_obj.suites[s_name] = new_suite\n s_obj = new_suite\n s_obj.tests.append(test)", "def test_get_filename_with_new_ext(self):\r\n test_paths = [('/from/root/test.xxx', 'test.yyy'),\r\n ('../relative/path/test.xxx', 'test.yyy'),\r\n ('/double/extension/in/filename/test.zzz.xxx',\r\n 'test.zzz.yyy')]\r\n\r\n for input, exp_output in test_paths:\r\n exp_output = join(self.output_dir, exp_output)\r\n\r\n self.assertEquals(\r\n get_filename_with_new_ext(input, '.yyy', self.output_dir),\r\n exp_output)", "def proto_test(test):\n if isinstance(test, ProtoTest):\n return test\n else:\n return ProtoTest(test)", "def test_import_string(self):\n assert utils.import_string('ttgn.pokedex.utils') == utils", "def test_parse_devide(self):\n self.assertEqual(parse_input.parse([\"8\", \"/\", \"4\"]), 2)", "def test_convert_noun():\n result = convert(\"noun\")\n assert result == \"ounnay\"", "def __init__(self, version_string: str):\n self.components = version_string.split(\".\")", "def __init__(self, version_string: str):\n self.components = version_string.split(\".\")", "def test_0090(self):\n node_list_v2_0_str = self._read_xml('nodeList_v2_0.xml')\n node_list_v1_str = c.str_to_v1_str(node_list_v2_0_str)\n self.assertTrue(c.str_is_v1(node_list_v1_str))\n self.assertFalse(c.str_is_v2(node_list_v1_str))", "def test_target_name(self, parse_input):\n bb = parse_input(\"name testname\\nversion 1.0\\ntarget example\")\n assert bb.target[\"name\"] == \"example\"", "def test_str(self):\n # Continuous ROMs\n model = roi._core.InterpolatedInferredContinuousROM(\"A\")\n assert str(model) == \\\n \"Reduced-order model structure: dx / dt = Ax(t)\"\n model.c_ = lambda t: t\n model.A_ = lambda t: t\n model.modelform = \"cA\"\n assert str(model) == \\\n \"Reduced-order model structure: dx / dt = c(µ) + A(µ)x(t)\"\n model.Hc_ = None\n model.Gc_ = lambda t: t\n model.B_ = None\n model.modelform = \"HB\"\n assert str(model) == \\\n \"Reduced-order model structure: dx / dt = H(x(t) ⊗ x(t)) + Bu(t)\"\n model.modelform = \"G\"\n assert str(model) == \\\n \"Reduced-order model structure: dx / dt = G(µ)(x(t) ⊗ x(t) ⊗ x(t))\"\n\n # Discrete ROMs\n model = roi._core.AffineIntrusiveDiscreteROM(\"cH\")\n assert str(model) == \\\n \"Reduced-order model structure: x_{j+1} = c + H(x_{j} ⊗ x_{j})\"\n model.c_ = lambda t: t\n model.Hc_ = None\n assert str(model) == \\\n \"Reduced-order model structure: x_{j+1} = c(µ) + H(x_{j} ⊗ x_{j})\"", "def parse(s):\n return s", "def test_get_result_string_1(self):\n attr_list = [\"type\", \"phage_id\", \"eval_mode\"]\n string = import_genome.get_result_string(self.tkt, attr_list)\n exp = \"type: replace, phage_id: Trixie, eval_mode: final\"\n self.assertEqual(string, exp)", "def parse_input(giant_string):\r\n X_train_part, Y_train_part, X_test_part = giant_string.split(\"XXX\")\r\n\r\n X_train_row_strings = X_train_part.split(\"S\")\r\n X_train_rows = [[float(x) for x in row.split(\",\")] for row in X_train_row_strings]\r\n X_train = np.array(X_train_rows)\r\n\r\n Y_train = concatenated_string_to_array(Y_train_part)\r\n\r\n X_test_row_strings = X_test_part.split(\"S\")\r\n X_test_rows = [[float(x) for x in row.split(\",\")] for row in X_test_row_strings]\r\n X_test = np.array(X_test_rows)\r\n\r\n return X_train, Y_train, X_test", "def run_tests():\r\n p = Parser(b\"0About prices\\tPrices/aboutus\\tserver.example.com\\t70\\r\\n\".decode(\"ascii\"))\r\n print(p._parse_dir())", "def test_str(self):\n # mocks of files\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n\n ap = APInfo(port_id=1, ip=\"2.2.2.2\", mac=\"bb:bb:bb:bb:bb:bb\", radio_mac=\"bb:bb:bb:bb:bb:00\", udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file)\n\n self.assertEqual(str(ap), 'APbbbb.bbbb.bbbb')\n self.assertEqual(str(ap), ap.name)", "def test_get_service_string(self):\n pass", "def parse_input(giant_string):\n X_train_part, Y_train_part, X_test_part = giant_string.split(\"XXX\")\n\n X_train_row_strings = X_train_part.split(\"S\")\n X_train_rows = [[float(x) for x in row.split(\",\")] for row in X_train_row_strings]\n X_train = np.array(X_train_rows)\n\n Y_train = concatenated_string_to_array(Y_train_part)\n\n X_test_row_strings = X_test_part.split(\"S\")\n X_test_rows = [[float(x) for x in row.split(\",\")] for row in X_test_row_strings]\n X_test = np.array(X_test_rows)\n\n return X_train, Y_train, X_test", "def _make_partitionsTest_label(chain_parts):\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario == 'partitionsTest'\n\n \n\n return \"\"\"\n partgen(\n [(20et, 0eta320)]\n \n simple([(40et, 0eta320) (50et, 0eta320)])\n simple([(35et, 0eta240) (55et, 0eta240)])\n )\"\"\"", "def test_parse_version():\n version = VersionUtils.parse_version('9.5.3')\n assert version == VersionInfo(9, 5, 3)\n\n # Test #.# style versions\n v10_2 = VersionUtils.parse_version('10.2')\n assert v10_2 == VersionInfo(10, 2, 0)\n\n v11 = VersionUtils.parse_version('11')\n assert v11 == VersionInfo(11, 0, 0)\n\n # Test #beta# style versions\n beta11 = VersionUtils.parse_version('11beta3')\n assert beta11 == VersionInfo(11, 0, 0, prerelease='beta.3')\n\n assert v10_2 < beta11\n assert v11 > beta11\n\n # Test #rc# style versions\n version = VersionUtils.parse_version('11rc1')\n assert version == VersionInfo(11, 0, 0, prerelease='rc.1')\n\n # Test #nightly# style versions\n version = VersionUtils.parse_version('11nightly3')\n assert version == VersionInfo(11, 0, 0, 'nightly.3')\n\n v12_3_tde = VersionUtils.parse_version('12.3_TDE_1.0')\n assert v12_3_tde == VersionInfo(12, 3, 0)", "def save_test(self,test_id):\n l = test_id.split('.')\n if len(l) > 1:\n self.suites[l[:1][0]].save_test('.'.join(l[1:]))\n else:\n suite_id = self.get_id()\n if suite_id:\n test_id = '.'.join((suite_id,test_id))\n test = [t for t in self.tests if t.id == test_id]\n if len(test) >= 0:\n test = test[0]\n else:\n raise Exception(\"Unknown test '%s'\" % test_id)\n testfile = test.id.split('.')[-1:][0]+'.fbt'\n try:\n f = open(os.path.join(self.path,testfile),'w')\n f.write(test.as_expression())\n f.write('\\n')\n finally:\n f.close()", "def test_parse(test, result, capsys):\n print(calc.parse(test))\n out, err = capsys.readouterr()\n print(err)\n assert out == result", "def test_0080(self):\n node_v2_0_str = self._read_xml('node_v2_0.xml')\n node_v1_str = c.str_to_v1_str(node_v2_0_str)\n self.assertTrue(c.str_is_v1(node_v1_str))\n self.assertFalse(c.str_is_v2(node_v1_str))", "def create_from_string(root_str):\n\n root_dict = root_str.replace(' ', '').replace(',next:None', '').replace(\n 'val:', '\"val\":').replace('next:', '\"next\":')\n\n data = json.loads(root_dict)\n\n return create_from_dict(data)", "def test_decode_token():\n pass", "def from_str(cls, string):", "def get_test(test_file):\n with open(\"ham_spam_testing\", \"r\", encoding=\"latin-1\") as file:\n test = file.read()\n test = test.lower()\n test = test.split(\"#*#*# \")\n test = test[1:] # Remove first entry, which is empty\n return test", "def test_binarytree_str_as_expected():\n input = (13, 42, 7)\n expected = 'BinaryTree | Root: 13'\n s = BinaryTree(input)\n actual = str(s)\n assert expected == actual", "def test_value_from_documentation(self):\n raw = [\n 0x4B,\n 0x4E,\n 0x58,\n 0x20,\n 0x69,\n 0x73,\n 0x20,\n 0x4F,\n 0x4B,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n ]\n string = \"KNX is OK\"\n self.assertEqual(DPTString.to_knx(string), raw)\n self.assertEqual(DPTString.from_knx(raw), string)", "def test_conversion_from_dict():\n model_definition = {\n 'language': {'type': 'fixed', 'default': 'english'},\n 'a': {'type': 'fixed', 'persisted': True},\n 'b.c': {'type': 'fixed', 'persisted': True},\n 'b.d.e': {'type': 'text', 'persisted': True},\n 'b.d.f': {'type': 'numeric', 'persisted': True}\n }\n factory = ProductModelFactory(model_definition)\n stemmed = text.parse_text_to_stems('english', 'a value that should be stemmed')\n model_dict = {\n 'a': 'test',\n 'b': {\n 'c': 'foo',\n 'd': {\n 'e': stemmed,\n 'f': 54321\n }\n }\n }\n product = pm.ProductModel.from_dict('test_product', model_dict, factory)\n nose.tools.eq_(product.get_attribute('a'), model_dict['a'], 'Attribute does not match')\n nose.tools.eq_(product.get_attribute('b.c'), model_dict['b']['c'], 'Attribute does not match')\n nose.tools.assert_list_equal(product.get_attribute('b.d.e'),\n model_dict['b']['d']['e'], 'Attribute does not match')\n nose.tools.eq_(product.get_attribute('b.d.f'), model_dict['b']['d']['f'], 'Attribute does not match')", "def TryStripTestId(test_id):\n test_id = test_id.replace('ninja://', '')\n for target in NINJA_TARGET_PREFIXES:\n test_id = test_id.replace(target, '')\n for subtest in TEST_SUITE_PREFIXES:\n test_id = test_id.replace(subtest, '')\n return test_id", "def test_convert_adjective():\n result = convert(\"adjective\")\n assert result == \"adjectiveway\"", "def testA():\n \n cunittest.assert_equals(\"0.814663951\",before_space(\"0.814663951 Euros\"))\n cunittest.assert_equals(\"Euros\",after_space(\"0.814663951 Euros\"))", "def test_get_ext(self):\r\n filename_str = 'http://www.example.com/path/video.mp4'\r\n output = get_ext(filename_str)\r\n self.assertEqual(output, 'mp4')", "def test_parse_version():\n version = parse_version(__version__)\n assert type(version) == Version", "def test_str(self):\n step = G(Step, display_name='test1')\n self.assertEqual('test1', str(step))", "def RemoveAllPrefixesFromGTestName(test):\n test_name_start = max(test.find('.'), 0)\n if test_name_start == 0:\n return test\n\n test_suite = test[:test_name_start]\n test_name = test[test_name_start + 1:]\n\n for prefix in _GTEST_PREFIXES:\n while test_name.startswith(prefix):\n test_name = test_name[len(prefix):]\n\n base_test = '%s.%s' % (test_suite, test_name)\n return base_test", "def test_for_str(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\n 'for str s in [\"one\", \"two\"]\\n\\tMeasureFock() | 0'\n )\n assert np.all(\n bb._forvar[\"s\"] == np.array([\"one\", \"two\"])\n )", "def fn2Test(pStrings, s, outputFile):\n with open(outputFile, 'w') as fH:\n fH.write(\" \".join(pStrings) + \" \" + s)\n return s", "def test_car_model_str_representation(car_model):\n assert str(car_model) == \"Golf\"", "def run_test(test, fw):\n\n test_path = f\"tests.{test}\"[:-3]\n print(test_path)\n __import__(test_path)\n test_module = sys.modules[test_path]\n analysis_id = test_module.main(fw)\n print(f\"analysis_id = {analysis_id}\")\n return analysis_id", "def test_cli_string():\n cmd = get_cli_string()\n assert \"pytest\" in cmd", "def format_test(version, line, num):\n line = line.split()\n ####line[ 0] = \"../../../source/replace.v\" + str(version) + \".exe\"\n ####line[-3] = \"replace\" + line[-3][2:]\n ####line[-1] = \"replace/outputs/v\" + str(version) + \"/\" + line[-1].split('/')[-1]\n line = [\"../../../source/replace.v\" + str(version) + \".exe\"] + line\n line[-1] = \"replace/inputs/\" + line[-1]\n line.append(\">\")\n line.append(\"replace/outputs/v\" + str(version) + \"/t\" + str(num)) \n return(line)#line[0], ' '.join(line[1:])])", "def testtofloatString ( self ):\r\n\t\tr = re.compile ( 'frac' )\r\n\t\tfor fracTup1, expRes in self.knownFloatStringValues:\r\n\t\t\tfrac1 = eval ( r.sub ( 'frac.frac', fracTup1 ) ) \r\n\t\t\tself.assertEqual ( frac1.tofloatString (), expRes )", "def test_name(self, parse_input):\n bb = parse_input(\"name testname\\nversion 1.0\")\n assert bb.name == \"testname\"", "def test_process_string():\n decode = StringProcessor()\n assert decode.process_string(\"ab\") == \"\"\n decode.output = \"\"\n\n assert decode.process_string(\"ab*\") == \"b\"\n decode.output = \"\"\n\n assert decode.process_string(\"ab^\") == \"ba\"\n decode.output = \"\"\n\n assert decode.process_string(\"^\") == \"\"", "def test_name00101m1_positive_376(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/Notation/name/name00101m/name00101m1.xsd\",\n instance=\"sunData/Notation/name/name00101m/name00101m1_p.xml\",\n class_name=\"A\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def test_string_conversion(profile_factory):\n profile = profile_factory()\n\n assert str(profile) == profile.name" ]
[ "0.5940089", "0.5811446", "0.5723814", "0.5717782", "0.5511057", "0.5496379", "0.54187816", "0.54119813", "0.52977437", "0.5285956", "0.52357966", "0.52247846", "0.51995105", "0.5177037", "0.51671624", "0.5032216", "0.50164425", "0.500829", "0.50076115", "0.50026107", "0.50000495", "0.49973047", "0.4996995", "0.4994985", "0.4985716", "0.49750087", "0.49674645", "0.49591765", "0.49554506", "0.49422157", "0.4928594", "0.49247533", "0.49217558", "0.4920824", "0.49202818", "0.4903273", "0.48985916", "0.48870564", "0.48868287", "0.48858586", "0.48750016", "0.48707464", "0.4869355", "0.48680842", "0.48618922", "0.48611295", "0.4846511", "0.48462373", "0.48329923", "0.48226365", "0.4821234", "0.481085", "0.48097447", "0.4807568", "0.48004845", "0.47951517", "0.47930053", "0.47929844", "0.4789837", "0.4789837", "0.47873354", "0.47808126", "0.47778144", "0.477671", "0.47752133", "0.4771087", "0.47702098", "0.47605065", "0.47587523", "0.47561342", "0.4754193", "0.47227275", "0.47165272", "0.47040895", "0.47036904", "0.47011876", "0.46997282", "0.4686564", "0.4684547", "0.4683238", "0.4679722", "0.46777493", "0.4677238", "0.4677151", "0.46721995", "0.46656373", "0.4663313", "0.4661856", "0.46593428", "0.4655932", "0.46507987", "0.46414024", "0.46388257", "0.4637436", "0.4637161", "0.46306247", "0.46289295", "0.4626216", "0.46257946", "0.4624651" ]
0.57777196
2
Returns a set of test names that are formatted to a single test like 'tests.wedge100.test_eeprom.EepromTest.test_odm_pcb'
def get_all_platform_tests(self): for testitem in self.get_tests(self.discover_tests()): if not testitem: continue prefix = "tests." + self.platform + "." self.formatted_tests_set.append( prefix + self.format_into_test_path(testitem) ) if self.denylist: try: with open(self.denylist, "r") as f: denylist = f.read().splitlines() except FileNotFoundError: denylist = [] self.formatted_tests_set = [ t for t in self.formatted_tests_set if t not in denylist ] return self.formatted_tests_set
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_name(self):\r\n parts = []\r\n if self.test.__module__ != '__main__':\r\n parts.append(self.test.__module__)\r\n if hasattr(self.test, 'im_class'):\r\n parts.append(self.test.im_class.__name__)\r\n parts.append(self.test.__name__)\r\n return '.'.join(parts)", "def get_tests():\n\tret = []\n\tfor walk_tuple in os.walk(webnotes.defs.modules_path):\n\t\tfor test_file in filter(lambda x: x.startswith('test') and x.endswith('.py'), walk_tuple[2]):\n\t\t\tdir_path = os.path.relpath(walk_tuple[0], webnotes.defs.modules_path)\n\t\t\tif dir_path=='.':\n\t\t\t\tret.append(test_file[:-3])\n\t\t\telse:\n\t\t\t\tret.append(dir_path.replace('/', '.') + '.' + test_file[:-3])\t\t\t\n\treturn ret", "def get_tests():\n # tests = ['test_build_gaussian_pyramid_random', 'test_build_gaussian_pyramid_static', 'test_build_laplacian_pyramid_random', 'test_build_laplacian_pyramid_static', 'test_laplacian_to_image', 'test_render_pyramid_random', 'test_render_pyramid_static']\n # return [tester.TestEx3(method) for method in tests]\n return [tester.TestEx3(method) for method in dir(tester.TestEx3) if method.startswith('test')]", "def get_a_list_of_testset_names() -> str:\n message = 'The available test sets are:'\n for testset in sorted(DATASETS.keys(), reverse=True):\n message += '\\n%20s: %s' % (testset, DATASETS[testset].get('description', ''))\n return message", "def split(test_name):\n recipe, simple_test_name = test_name.split('.', 1)\n return recipe, simple_test_name", "def PytestNameToLabel(pytest_name):\n def _GuessIsAcronym(word):\n return not word.isalpha() or all(c not in 'aeiouy' for c in word)\n\n pytest_name = pytest_name.replace('.', ' ').replace('_', ' ')\n parts = []\n seen = set()\n for part in pytest_name.split():\n if part in seen:\n continue\n seen.add(part)\n parts.append(\n FactoryTest._PYTEST_LABEL_MAP.get(\n part, part.upper() if _GuessIsAcronym(part) else part.title()))\n return ' '.join(parts)", "def list_feature_tests(self):\n\t\treturn self.test_names", "def parse_gtest_tests(gtest_output_raw: str):\n test_list = []\n current_test_prefix = ''\n gtest_output_split = gtest_output_raw.split('\\n')\n current_index = 0\n # skip to the actual test list\n while current_index < len(gtest_output_split):\n current_string = gtest_output_split[current_index]\n test_matches = re.findall(r'^[a-zA-Z]*\\.$', current_string)\n if len(test_matches) != 0:\n break\n current_index += 1\n while current_index < len(gtest_output_split):\n current_string = gtest_output_split[current_index]\n if len(current_string) == 0:\n current_index += 1\n continue\n # get the test name\n test_match = re.findall(r'^\\s*\\S*', current_string)[0].replace(' ', '')\n if test_match[len(test_match) - 1] == '.':\n # We've found a new prefix\n current_test_prefix = test_match\n current_index += 1\n continue\n test_list.append(current_test_prefix + test_match)\n current_index += 1\n return test_list", "def flat_test_name(_id):\n return \"-\".join(_id.split(\".\")[1:])", "def name_python_package_tests(self) -> str:\n return f'test_{self.name}'", "def get_test_modules_names() -> typing.List[str]:\n\n from services.meter.tests.unit import constants_for_tests\n return constants_for_tests.TESTS_MODULES", "def RemoveAllPrefixesFromGTestName(test):\n test_name_start = max(test.find('.'), 0)\n if test_name_start == 0:\n return test\n\n test_suite = test[:test_name_start]\n test_name = test[test_name_start + 1:]\n\n for prefix in _GTEST_PREFIXES:\n while test_name.startswith(prefix):\n test_name = test_name[len(prefix):]\n\n base_test = '%s.%s' % (test_suite, test_name)\n return base_test", "def getTestCaseNames(self, testCaseClass):\r\n def isTestMethod(attrname, testCaseClass=testCaseClass,\r\n prefix=self.testMethodPrefix):\r\n return attrname.startswith(prefix) and \\\r\n hasattr(getattr(testCaseClass, attrname), '__call__')\r\n testFnNames = filter(isTestMethod, dir(testCaseClass))\r\n if self.sortTestMethodsUsing:\r\n testFnNames.sort(key=_CmpToKey(self.sortTestMethodsUsing))\r\n return testFnNames", "def _testcase_name(testcase):\n name = os.path.splitext(os.path.basename(testcase))[0]\n name = name.replace('-', '_')\n name = 'test_{name}'.format(name=name)\n\n assert name.isidentifier()\n\n return name", "def _generate_test_name(source):\n out = source.replace(' ', '_').replace(':', '').replace(',', '').lower()\n return \"test_%s\" % out", "def create_test_file_name(test_file):\n 'test.{}'.format(test_file.replace('.py', ''))", "def test_correct(self):\n tests = [\n 'test.1',\n 'test.2',\n ]\n expected = 'test.1:test.2'\n\n self.assertEqual(test_apps.get_gtest_filter(tests), expected)", "def get_helper_names(self):\n names = []\n\n if self.replay_source is None or self.test_type == 'JOINT':\n return names\n\n for c in self.get_helpers_classes():\n helper_name = \"P\" + str(self.parameters_common_index) + \".\" + \\\n str(self.parameters_fs_index) + \".\" + \\\n str(self.parameters_helper_index) + \"_E\" + \\\n str(self.get_encoder_number()) + \"_C\" + str(c[0]) + \\\n \"-\" + str(c[-1])\n\n names.append(helper_name)\n\n return names", "def _get_resource_test_names(self):\n\t\ttests = []\n\t\tfor resource in self.resources:\n\t\t\tpath = os.path.join(self.history_path, '*', '*', resource)\n\t\t\tself.logger.info(\"Looking for Inca tests in %s\" % path)\n\t\t\ttests.extend(glob.glob(path))\n\t\treturn tests", "def visitTests(tests, grepStr=''):\n\n # First flatten the list of tests.\n testsFlat = []\n toCheck = [t for t in tests]\n while toCheck:\n test = toCheck.pop()\n if isinstance(test, unittest.TestSuite):\n toCheck += [t for t in test]\n else:\n if grepStr in str(type(test)):\n testsFlat.append(test)\n testsFlat.sort()\n\n # Follow the flattened list of tests and show the module, class\n # and name, in a nice way.\n lastClass = None\n lastModule = None\n \n grepPrint = '' if grepStr is '' else red(' (grep: %s)'%grepStr)\n\n for t in testsFlat:\n moduleName, className, testName = t.id().rsplit('.', 2)\n \n # If there is a failure loading the test, show it\n if moduleName.startswith('unittest.loader.ModuleImportFailure'):\n print red(moduleName), \" test:\", t.id()\n continue\n\n if moduleName != lastModule:\n lastModule = moduleName\n print(\" - From %s.py (to run all use --allPrograms)\"\n % '/'.join(moduleName.split('.')) + grepPrint)\n\n\n if className != lastClass:\n lastClass = className\n print(\" ./xmipp test %s\" % className)", "def test(): \n\treturn [\"vice.yields.ccsne.import\", \n\t\t[ \n\t\t\ttest_LC18_import(), \n\t\t\ttest_CL13_import(), \n\t\t\ttest_CL04_import(), \n\t\t\ttest_WW95_import(), \n\t\t\ttest_NKT13_import(), \n\t\t\ttest_S16_import() \n\t\t] \n\t]", "def test_correct(self):\n tests = [\n 'KIF.test1',\n 'KIF.test2',\n ]\n expected = 'NAME:test1|test2'\n\n self.assertEqual(expected, test_apps.get_kif_test_filter(tests))", "def _RemoveAllPrefixes(test):\n test_name_start = max(test.find('.'), 0)\n if test_name_start == 0:\n return test\n\n test_suite = test[: test_name_start]\n test_name = test[test_name_start + 1 :]\n pre_position = test_name.find(_PRE_TEST_PREFIX)\n while pre_position == 0:\n test_name = test_name[len(_PRE_TEST_PREFIX):]\n pre_position = test_name.find(_PRE_TEST_PREFIX)\n base_test = '%s.%s' % (test_suite, test_name)\n return base_test", "def format_into_test_path(self, testitem):\n test_string = testitem.split(\"(\")\n test_path = test_string[1].split(\")\")[0]\n \"\"\"\n Test item has different path with different python version\n example:\n on 3.8 set_psu_cmd (tests.fuji.test_psu.Psu1Test)\n on 3.11 set_psu_cmd (tests.fuji.test_psu.Psu1Test.set_psu_cmd)\n \"\"\"\n if test_path.split(\".\")[-1].strip() != test_string[0].strip():\n test_path = test_path + \".\" + test_string[0]\n\n return test_path.strip()", "def FindTestName(self, test_constructors, args):\n test_name = None\n for arg in [self.GetModernizedTestName(a) for a in args]:\n if arg in test_constructors:\n test_name = arg\n\n return test_name", "def testTitleTemplateFindNames(self):\n\n\t\ttests = {\n\t\t\t'${abc.def.1}-$abc-${123}': {\n\t\t\t\t'abc.def.1': ['abc', 'def', 1],\n\t\t\t\t'123': [123]\n\t\t\t},\n\t\t\t'${abc..def} $$ ${qwe}': {'qwe': ['qwe']}\n\t\t}\n\n\t\tfor test in tests:\n\t\t\tt = TitleTemplate(test)\n\t\t\tself.assertEqual(t.getFieldNames(), tests[test])", "def extract_test_name(base_path):\n name = p.basename(base_path)\n if name == \"test.py\":\n name = \"\"\n elif name.startswith(\"test_\") and name.endswith(\".py\"):\n name = name[len(\"test_\") : (len(name) - len(\".py\"))]\n return name", "def get_validation_file_name(self):\n name = self.test_name + \" (T\" + str(self.test_index) + \"_P\" + str(self.parameters_common_index) + \".\" + \\\n str(self.parameters_fs_index) + \".\" + \\\n str(self.parameters_helper_index) + \".\" + \\\n str(self.parameters_incremental_index)\n\n if self.replay_source is not None:\n name = name + \"_\"+ self.replay_source\n\n if self.helper_decoders_one_class:\n name = name + \"_1\"\n\n name = name + \")\"\n\n return name", "def generate_name(self):\n name = self._generate_test_name()\n while self.exists(name):\n name = self._generate_test_name()\n return name", "def test_suite_name(self) -> str:\n return pulumi.get(self, \"test_suite_name\")", "def get_tested_endpoint_names(db_session):\n results = db_session.query(TestEndpoint).join(TestEndpoint.endpoint).group_by(TestEndpoint.endpoint_id).all()\n return [result.endpoint.name for result in results]", "def testGetAllPhEDExNodeNames(self):\n result = self.mySiteDB.getAllPhEDExNodeNames(excludeBuffer=True)\n self.assertFalse([pnn for pnn in result if pnn.endswith('_Buffer')])\n\n result = self.mySiteDB.getAllPhEDExNodeNames(excludeBuffer=False)\n self.assertTrue(len([pnn for pnn in result if pnn.endswith('_Buffer')]) > 5)\n\n result = self.mySiteDB.getAllPhEDExNodeNames(pattern='T1.*', excludeBuffer=True)\n self.assertFalse([pnn for pnn in result if not pnn.startswith('T1_')])\n self.assertTrue(len(result) > 10)\n\n result = self.mySiteDB.getAllPhEDExNodeNames(pattern='.*', excludeBuffer=True)\n self.assertTrue([pnn for pnn in result if pnn.startswith('T1_')])\n self.assertTrue([pnn for pnn in result if pnn.startswith('T2_')])\n self.assertTrue([pnn for pnn in result if pnn.startswith('T3_')])\n self.assertTrue(len(result) > 60)\n\n return", "def get_java_name(test):\n\n basename = os.path.basename(test)\n\n # note that this is fragile, but we're being conservative here\n name = basename.split('.')[0]\n\n # build the numeral-number table\n nums = {'0' : \"Zero\",\n '1' : \"One\",\n '2' : \"Two\",\n '3' : \"Three\",\n '4' : \"Four\",\n '5' : \"Five\",\n '6' : \"Six\",\n '7' : \"Seven\",\n '8' : \"Eight\",\n '9' : \"Nine\"}\n\n # do our replacement\n for k, v in nums.items():\n name = name.replace(k, v)\n\n # do the stripping split to obtain our fragments\n undesired_chars = '[' + string.whitespace + string.punctuation + ']'\n fragments = re.split(undesired_chars, name)\n\n # capitalize each fragment\n fragments = [f.capitalize() for f in fragments]\n\n # check the first and last fragments\n if fragments[0] != 'Webkit':\n fragments.insert(0, 'Webkit')\n\n if fragments[-1] != 'Test':\n fragments.append('Test')\n\n # join the results\n return ''.join(fragments)", "def testbed_name(self): \n return \"C-Lab\"", "def find_tests(testdir,\n prefixes=DEFAULT_PREFIXES, suffix=\".py\",\n excludes=(),\n remove_suffix=True):\n tests = []\n for name in os.listdir(testdir):\n if not suffix or name.endswith(suffix):\n for prefix in prefixes:\n if name.startswith(prefix):\n if remove_suffix and name.endswith(suffix):\n name = name[:-len(suffix)]\n if name not in excludes:\n tests.append(name)\n tests.sort()\n return tests", "def test_cases(self) -> list[str]:\n cases = []\n for t in self._test_cases:\n if t not in cases:\n cases.append(t)\n return cases", "def get_test_files():\n test_files = os.listdir('./test')\n return [\n create_test_file_name(test_file)\n for test_file in test_files\n if is_valid_test_file(test_files)\n ]", "def cleanup_test_name(name, strip_tags=True, strip_scenarios=False):\n if strip_tags:\n tags_start = name.find('[')\n tags_end = name.find(']')\n if tags_start > 0 and tags_end > tags_start:\n newname = name[:tags_start]\n newname += name[tags_end + 1:]\n name = newname\n\n if strip_scenarios:\n tags_start = name.find('(')\n tags_end = name.find(')')\n if tags_start > 0 and tags_end > tags_start:\n newname = name[:tags_start]\n newname += name[tags_end + 1:]\n name = newname\n\n return name", "def get_names(self):\n\n return self.mod_suites.keys()", "def list_tests(tests_module,\n test_module_names=None, test_class_map=None, skip_class_map=None):\n tests = load_tests(tests_module, test_module_names, test_class_map, skip_class_map)\n for test_class in tests:\n print(cmd.COLORS['title'](test_class.__name__) + ':')\n test_cases = unittest.loader.getTestCaseNames(test_class, 'test')\n for test_case in test_cases:\n print(textwrap.indent(test_case, cmd.INDENT))", "def find_all_test_files():\n #test_file_pattern = re.compile('^t(est)?_.*\\.py$')\n test_file_pattern = re.compile('.*_test\\.py$')\n is_test_file = lambda filename: test_file_pattern.match(filename)\n drop_dot_py = lambda filename: filename[:-3]\n join_module = lambda *names: '/'.join(names)\n\n modules = []\n for root, dirs, files in os.walk(os.curdir):\n root_name = os.path.split(root)[-1]\n for test_file in filter(is_test_file, files):\n module = join_module(root_name, drop_dot_py(test_file))\n modules.append(module)\n #modules += ['.'.join([root_name, drop_dot_py(test_file)]) for test_file in filter(is_test, files)]\n return modules", "def _get_tests(self, chunks):\n tests = []\n for path in chunks[self.chunk_number - 1].paths:\n tests.extend(path.tests)\n\n return tests", "def generate_test_string(self, test_log: dict, step: Union[int,None] = None) -> str:\n print_str = 'Epoch {}, '.format(step) if step else ''\n for k,v in test_log.items():\n print_str = print_str + '{} : {:.4f}, '.format(k, v)\n \n return print_str[:-1]", "def generate_test_list(tdir):\n\n # Skip this if it already exists\n if os.path.exists(os.path.join(tdir.name, \"kstest-list\")):\n return\n\n kstest_log = os.path.join(tdir.name, \"kstest.log\")\n with open(kstest_log) as f:\n for line in f.readlines():\n if not line.startswith(\"Running tests: \"):\n continue\n\n tests = [os.path.basename(os.path.splitext(s)[0]) for s in line[15:].split()]\n with open(os.path.join(tdir.name, \"kstest-list\"), \"wt\") as klf:\n for t in tests:\n print(t, file=klf)\n break", "def load_test_subjects_names(self):\n files = os.listdir(os.path.join(self.db_path, self.test_batch))\n for f in files:\n if f.startswith('test-volume'):\n s_name = str.split(str.split(f, '.')[0], '-')[-1]\n self.testing_subjects.append(s_name)\n self.n_test = len(self.testing_subjects)", "def name(self):\n return self._name or ' & '.join(s.name for s in self.suites)", "def get_tests(self):\n subtests = itertools.chain(*(s.get_tests() for s in self.suites.values()))\n tt = [t for t in itertools.chain(self.tests,subtests)]\n return tt", "def print_tests_results(self):\n\n for test in self.test_report:\n for detail in test:\n print detail + ': ', test[detail]", "def test():\n\t\treturn [\"vice.multizone\",\n\t\t\t[\n\t\t\t\ttest_from_output(),\n\t\t\t\tmig_matrix_row.test(run = False),\n\t\t\t\tmig_matrix.test(run = False),\n\t\t\t\tmig_specs.test(run = False),\n\t\t\t\tzone_array.test(run = False),\n\t\t\t\t_multizone.test(run = False),\n\t\t\t\tsrc_test(run = False)\n\t\t\t]\n\t\t]", "def test():\n\t\treturn [\"vice.core.objects.tests\",\n\t\t\t[\n\t\t\t\tagb.test_agb_grid_constructor(),\n\t\t\t\tagb.test_agb_grid_destructor(),\n\t\t\t\tcallback_1arg.test_callback_1arg_constructor(),\n\t\t\t\tcallback_1arg.test_callback_1arg_destructor(),\n\t\t\t\tcallback_2arg.test_callback_2arg_constructor(),\n\t\t\t\tcallback_2arg.test_callback_2arg_destructor(),\n\t\t\t\tccsne.test_ccsne_yield_specs_constructor(),\n\t\t\t\tccsne.test_ccsne_yield_specs_destructor(),\n\t\t\t\tchannel.test_channel_constructor(),\n\t\t\t\tchannel.test_channel_destructor(),\n\t\t\t\telement.test_element_constructor(),\n\t\t\t\telement.test_element_destructor(),\n\t\t\t\tfromfile.test_fromfile_constructor(),\n\t\t\t\tfromfile.test_fromfile_destructor(),\n\t\t\t\thydrodiskstars.test_hydrodiskstars_constructor(),\n\t\t\t\thydrodiskstars.test_hydrodiskstars_destructor(),\n\t\t\t\timf.test_imf_constructor(),\n\t\t\t\timf.test_imf_destructor(),\n\t\t\t\tintegral.test_integral_constructor(),\n\t\t\t\tintegral.test_integral_destructor(),\n\t\t\t\tinterp_scheme_1d.test_interp_scheme_1d_constructor(),\n\t\t\t\tinterp_scheme_1d.test_interp_scheme_1d_destructor(),\n\t\t\t\tinterp_scheme_2d.test_interp_scheme_2d_constructor(),\n\t\t\t\tinterp_scheme_2d.test_interp_scheme_2d_destructor(),\n\t\t\t\tism.test_ism_constructor(),\n\t\t\t\tism.test_ism_destructor(),\n\t\t\t\tmdf.test_mdf_constructor(),\n\t\t\t\tmdf.test_mdf_destructor(),\n\t\t\t\tmigration.test_migration_constructor(),\n\t\t\t\tmigration.test_migration_destructor(),\n\t\t\t\tmultizone.test_multizone_constructor(),\n\t\t\t\tmultizone.test_multizone_destructor(),\n\t\t\t\tsinglezone.test_singlezone_constructor(),\n\t\t\t\tsinglezone.test_singlezone_destructor(),\n\t\t\t\tsneia.test_sneia_yield_specs_constructor(),\n\t\t\t\tsneia.test_sneia_yield_specs_destructor(),\n\t\t\t\tssp.test_ssp_constructor(),\n\t\t\t\tssp.test_ssp_destructor(),\n\t\t\t\ttracer.test_tracer_constructor(),\n\t\t\t\ttracer.test_tracer_destructor()\n\t\t\t]\n\t\t]", "def test_excalibur_name():\n assert I07Nexus.excalibur_detector_2021 == \"excroi\"\n assert I07Nexus.excalibur_04_2022 == \"exr\"", "def get_named_suites():\n\n # Skip \"with_server\" and \"no_server\" because they do not define any test files to run.\n executor_only = set([\"with_server\", \"no_server\"])\n suite_names = [suite for suite in resmokeconfig.NAMED_SUITES if suite not in executor_only]\n suite_names.sort()\n return suite_names", "def list(self):\n print \"\\nAvailable Test Cases\"\n print \"====================\"\n for case in self.cases:\n print case.__name__", "def get_parameterized_names():\n return [name.split('.')[0] for name in os.listdir(os.path.dirname(__file__) + '/../test_schemas')\n if 'mixins' not in name]", "def gettestinfos_side_effect(test_names, test_mapping_test_details=None):\n test_infos = set()\n for test_name in test_names:\n if test_name == uc.MODULE_NAME:\n test_infos.add(uc.MODULE_INFO)\n if test_name == uc.CLASS_NAME:\n test_infos.add(uc.CLASS_INFO)\n return test_infos", "def test_title(names):", "def get_test_case_filenames(program, test_case):\n\n filenames = {}\n for f in INFO[program].test_cases[test_case]:\n filenames[f] = os.path.join(INFO[program].base_folder, test_case, f)\n\n return filenames", "def test_get_namespaces_names(self):\n pass", "def get_functional_test_cases(test_suite):\n return get_cases(test_suite, r'test_(?!perf_)')", "def get_cases(test_suite, test_name_regex):\n cases = []\n for test_case_name in dir(test_suite):\n test_case = getattr(test_suite, test_case_name)\n if callable(test_case) and re.match(test_name_regex, test_case_name):\n cases.append(test_case_name)\n\n return cases", "def get_test_functions():\r\n\r\n test_funcs = [obj for name,obj in inspect.getmembers(sys.modules[__name__])\r\n if (inspect.isfunction(obj) and name.startswith('test'))]\r\n src = inspect.getsource(sys.modules[__name__])\r\n lines = src.split('\\n')\r\n\r\n # Create a dictionary with key=function name and value is 0-based order\r\n # in the module\r\n ordered_func_names = dict()\r\n ordered_funcs = list()\r\n func_index = 0\r\n for line in lines:\r\n if line.find(\"def test\") > -1 and not line.find('line.find') > -1:\r\n func_name = line.split(\"(\")[0].split()[1]\r\n ordered_func_names[func_name] = func_index\r\n # Create an empty list with sampe number of elements as test\r\n # functions\r\n ordered_funcs.append('')\r\n func_index += 1\r\n for test_func in test_funcs:\r\n index = ordered_func_names[test_func.__name__]\r\n ordered_funcs[index] = test_func\r\n return ordered_funcs", "def test_class_module_names(dependency_testing_model) -> None:\n expected_modules = {\n 'builtins',\n 'calendar',\n 'click',\n 'cloudpickle',\n 'collections',\n 'datetime',\n 'google',\n 'json',\n 'numpy',\n 'pandas',\n 'PIL',\n 'requests',\n 'requests',\n 'sklearn',\n 'torch',\n 'typing',\n 'urllib3',\n 'verta',\n 'yaml',\n }\n extracted_modules: Set[str] = md.class_module_names(dependency_testing_model)\n assert set(extracted_modules) == set(expected_modules)", "def testName(self):\n dis_meta = DiseaseMeta()\n\n self.util.stringTypeTest(self, dis_meta, \"name\")\n\n self.util.stringPropertyTest(self, dis_meta, \"name\")", "def get_test_labels(self):\n raise NotImplementedError", "def get_testbench_name(self, tb_type: str) -> str:\n return f'{self._meas_name}_TB_{tb_type}'", "def get_test_method_name(cls):\n loader = unittest.loader.TestLoader()\n test_names = loader.getTestCaseNames(cls)\n\n if len(test_names) != 1:\n raise AttributeError(\"Component %r has illegal number of test \"\n \"methods : %s\" % (cls.__name__, test_names))\n\n return test_names[0]", "def test_get_filename_with_new_ext(self):\r\n test_paths = [('/from/root/test.xxx', 'test.yyy'),\r\n ('../relative/path/test.xxx', 'test.yyy'),\r\n ('/double/extension/in/filename/test.zzz.xxx',\r\n 'test.zzz.yyy')]\r\n\r\n for input, exp_output in test_paths:\r\n exp_output = join(self.output_dir, exp_output)\r\n\r\n self.assertEquals(\r\n get_filename_with_new_ext(input, '.yyy', self.output_dir),\r\n exp_output)", "def print_test_details(scenario, days, test_name, buf):\n for d in days:\n if scenario not in days[d]:\n continue\n\n for n in days[d][scenario][test_name]:\n print(f\"\\n{n}:\", file=buf)\n for test in days[d][scenario][test_name][n]:\n if \"start_time\" not in test:\n start_time = \"\"\n else:\n start_time = datetime.fromtimestamp(test[\"start_time\"]).strftime(\"%m/%d/%Y %H:%M:%S\")\n\n if \"elapsed_time\" not in test:\n elapsed_time = 0\n else:\n elapsed_time = test[\"elapsed_time\"]\n\n # Get the result message\n msg = test[\"result\"].rsplit(\"FAILED:\")[-1]\n print(f' {start_time} ({elapsed_time}s): {msg}', file=buf)", "def gather_tests(self):\n rosie_tests_dir = os.path.join(cp_tests_dir(),\n \"circuitpython\",\n \"rosie_tests\")\n test_files = []\n for test in os.scandir(rosie_tests_dir):\n # TODO: implement exclusions by board\n if test.path.endswith(\".py\"):\n test_files.append(TestObject(test.path))\n\n return test_files", "def name_test(item):\n return f\"{item['params']['interface']}:{item['expected']['state']}\"", "def full_names(self) -> List[str]:\n self.names = [\n \".\".join(prod)\n for prod in product(*self._namespaces, self.terminals)\n ]\n return self.names", "def get_test_method_name(self) -> str:\n return self._testMethodName", "def target_test_file_name():\n return 'test'", "def named(path):\n return re.findall(r'.*(test\\d+)\\.out', path)[0]", "def getNames():\n\n return ('run', 'Run Network')", "def get_resource_testbed_name(cls, urn):\n t_urn = urn.split(\"+\")\n # urn:publicid:IDN+fuseco.fokus.fraunhofer.de+node+epc_measurement_server\n # urn:publicid:IDN+wall2.ilabt.iminds.be+node+n097-10b\n return t_urn[1]", "def generate_test_cohort_name(project):\n return 'testcohort_{0}_{1}'.\\\n format(project,\n format_mediawiki_timestamp(datetime.now()))", "def extract_suite_name(file_path, project_name):\n\n suite_name = str(project_name) + \".\"\n suite_name = suite_name + os.path.splitext(str(file_path).replace(os_sep, \".\"))[0]\n return suite_name", "def __init_key_value_fmts__(self):\n self.test_desc_fmt = \"{0}.description\"\n self.test_svc_name_fmt = \"{0}.svc\"\n self.test_svc_ipc_id_fmt = \"{0}.svc.{1}.ipc_id\"\n self.test_svc_rsp_code_fmt = \"{0}.svc.{1}.svc_rsp_code\"\n self.test_svc_rsp_stage_fmt = \"{0}.svc.{1}.svc_rsp_stage\"\n self.test_pre_hook_stage_fmt = \"{0}.pre_hook.stage\"\n self.test_pre_hook_script_fmt = \"{0}.pre_hook.script\"\n self.test_post_hook_stage_fmt = \"{0}.post_hook.stage\"\n self.test_post_hook_script_fmt = \"{0}.post_hook.script\"\n self.test_svc_rsp_time_delay_fmt = \"{0}.svc.{1}.rsp_time_delay\"\n\n self.test_expected_result_fmt = \"{0}.expected_result\"\n self.test_exp_api_return_fmt = \"{0}.expected_result.api_return_code\"\n self.test_exp_log_file_fmt = \"{0}.expected_result.log\"\n self.test_exp_log_fmt = \"{0}.expected_result.log.{1}\"", "def get_replay_helper_tfrecords_names(self):\n names = []\n\n if self.replay_source is None or self.test_type == 'JOINT':\n return names\n\n \n helper_classes = self.get_helpers_classes()\n\n for hc in helper_classes:\n current_names = []\n for c in hc:\n name = \"P\" + str(self.parameters_common_index) + \".\" + \\\n str(self.parameters_fs_index) + \".\" + \\\n str(self.parameters_helper_index) + \\\n \"_E\" + str(self.get_encoder_number()) + \\\n \"_H\" + str(self.get_helper_size()) + \\\n \"_C\" + str(c) + \"_\" + self.replay_source\n current_names.append(name)\n names.append(current_names)\n\n return names", "def test_create_final_name(self):\n \n date = \"111111\"\n fcid = \"A11A22BCXX\"\n sample_name = \"P101_150B_index5\"\n \n test_names = [(\"1_{}_{}_1_nophix_1_fastq.txt.gz\".format(date,fcid),\n \"1_{}_{}_{}_1.fastq.gz\".format(date,fcid,sample_name)),\n (\"1_{}_{}_1_nophix_1_fastq.txt\".format(date,fcid),\n \"1_{}_{}_{}_1.fastq\".format(date,fcid,sample_name)),\n (\"1_{}_{}_1_1_fastq.txt.gz\".format(date,fcid),\n \"1_{}_{}_{}_1.fastq.gz\".format(date,fcid,sample_name)),\n (\"{}_CGATGT_L001_R1_001.fastq.gz\".format(sample_name),\n \"1_{}_{}_{}_1.fastq.gz\".format(date,fcid,sample_name)),\n (\"{}_NoIndex_L001_R2_001.fastq.gz\".format(sample_name),\n \"1_{}_{}_{}_2.fastq.gz\".format(date,fcid,sample_name)),\n (\"{}_CGATGT_L001_R1_001.fastq..gz\".format(sample_name),\n \"1_{}_{}_{}_1.fastq.gz\".format(date,fcid,sample_name)),\n (\"{}_CGATGT_L001_R1_001.fastq\".format(sample_name),\n \"1_{}_{}_{}_1.fastq\".format(date,fcid,sample_name))]\n \n for test_fname, exp_result in test_names:\n obs_result = create_final_name(test_fname,date,fcid,sample_name)\n self.assertEqual(obs_result,\n exp_result,\n \"Did not get expected final name ({:s}) for file name {:s}\".format(exp_result,test_fname))\n \n # Try without the _index part of file name\n sample_name_noindex = \"P101_150\"\n test_names = [(\"1_{}_{}_1_nophix_1_fastq.txt.gz\".format(date,fcid),\n \"1_{}_{}_{}_1.fastq.gz\".format(date,fcid,sample_name_noindex)),\n (\"{}_CGATGT_L001_R1_001.fastq.gz\".format(sample_name_noindex),\n \"1_{}_{}_{}_1.fastq.gz\".format(date,fcid,sample_name_noindex)),\n (\"{}_NoIndex_L001_R2_001.fastq.gz\".format(sample_name_noindex),\n \"1_{}_{}_{}_2.fastq.gz\".format(date,fcid,sample_name_noindex))]\n \n for test_fname, exp_result in test_names:\n obs_result = create_final_name(test_fname,date,fcid,sample_name_noindex)\n self.assertEqual(obs_result,\n exp_result,\n \"Did not get expected final name ({:s}) for file name {:s}\".format(exp_result,test_fname))\n \n # Try some illegal file names and assert that they raise exceptions\n test_names = [\"1_{}_{}_1_nophix_1_fastq.gz\".format(date,fcid),\n \"a_{}_{}_1_nophix_1_fastq.txt\".format(date,fcid),\n \"{}_CGATRGT_L1_R1_001.fastq.gz\".format(sample_name)]\n for test_name in test_names:\n with self.assertRaises(ValueError):\n create_final_name(test_name,date,fcid,sample_name)\n \n # Try a file with undetermined reads\n sample_name = \"lane1\"\n test_names = [(\"{}_Undetermined_L001_R1_001.fastq.gz\".format(sample_name),\n \"1_{}_{}_{}_1.fastq.gz\".format(date,fcid,sample_name)),] \n for test_fname, exp_result in test_names:\n obs_result = create_final_name(test_fname,date,fcid,sample_name)\n self.assertEqual(obs_result,\n exp_result,\n \"Did not get expected final name ({:s}) for file name {:s}\".format(exp_result,test_fname))", "def print_tests(pkg):\n\n # Some built-in base packages (e.g., Autotools) define callback (e.g.,\n # check) inherited by descendant packages. These checks may not result\n # in build-time testing if the package's build does not implement the\n # expected functionality (e.g., a 'check' or 'test' targets).\n #\n # So the presence of a callback in Spack does not necessarily correspond\n # to the actual presence of built-time tests for a package.\n for callbacks, phase in [\n (pkg.build_time_test_callbacks, \"Build\"),\n (pkg.install_time_test_callbacks, \"Install\"),\n ]:\n color.cprint(\"\")\n color.cprint(section_title(\"Available {0} Phase Test Methods:\".format(phase)))\n names = []\n if callbacks:\n for name in callbacks:\n if getattr(pkg, name, False):\n names.append(name)\n\n if names:\n colify(sorted(names), indent=4)\n else:\n color.cprint(\" None\")\n\n # PackageBase defines an empty install/smoke test but we want to know\n # if it has been overridden and, therefore, assumed to be implemented.\n color.cprint(\"\")\n color.cprint(section_title(\"Stand-Alone/Smoke Test Methods:\"))\n names = []\n pkg_cls = pkg if inspect.isclass(pkg) else pkg.__class__\n if has_test_method(pkg_cls):\n pkg_base = spack.package_base.PackageBase\n test_pkgs = [\n str(cls.test)\n for cls in inspect.getmro(pkg_cls)\n if issubclass(cls, pkg_base) and cls.test != pkg_base.test\n ]\n test_pkgs = list(set(test_pkgs))\n names.extend([(test.split()[1]).lower() for test in test_pkgs])\n\n # TODO Refactor START\n # Use code from package_base.py's test_process IF this functionality is\n # accepted.\n v_names = list(set([vspec.name for vspec in pkg.virtuals_provided]))\n\n # hack for compilers that are not dependencies (yet)\n # TODO: this all eventually goes away\n c_names = (\"gcc\", \"intel\", \"intel-parallel-studio\", \"pgi\")\n if pkg.name in c_names:\n v_names.extend([\"c\", \"cxx\", \"fortran\"])\n if pkg.spec.satisfies(\"llvm+clang\"):\n v_names.extend([\"c\", \"cxx\"])\n # TODO Refactor END\n\n v_specs = [spack.spec.Spec(v_name) for v_name in v_names]\n for v_spec in v_specs:\n try:\n pkg_cls = spack.repo.path.get_pkg_class(v_spec.name)\n if has_test_method(pkg_cls):\n names.append(\"{0}.test\".format(pkg_cls.name.lower()))\n except spack.repo.UnknownPackageError:\n pass\n\n if names:\n colify(sorted(names), indent=4)\n else:\n color.cprint(\" None\")", "def testGetEpisodeName(self):\n\t\tfor case in self.testCases:\n\t\t\tassert case['title'] == getEpisodeName( case['show'], case['season'], case['episode'])", "def testName(self):\n project = self.session.create_project()\n\n self.util.stringTypeTest(self, project, \"name\")\n\n self.util.stringPropertyTest(self, project, \"name\")", "def extract_test_name(filename, line_number):\n\n with open(filename) as f:\n lines = f.readlines()\n tests_begins = next((x for x in xrange(len(lines)) if lines[x].startswith(\"*** Test Cases ***\")), -1)\n if tests_begins == -1:\n raise Exception(\"No Test Case Section\")\n tests_ends = next((x for x in xrange(tests_begins + 1, len(lines)) if lines[x][0:3] == \"***\"), len(lines))\n\n if line_number <= tests_begins or line_number >= tests_ends:\n raise Exception(\"Outside of test cases section\")\n\n test_case_line = next(\n (x for x in xrange(line_number, tests_begins, -1) if len(lines[x]) > 1 and lines[x][0] != ' '), -1)\n if test_case_line == -1:\n raise Exception(\"No test case found in Test Cases section\")\n\n return str(lines[test_case_line]).strip()", "def name() -> str:\n return \"test-helper-nuke\"", "def GetTestSuiteName(normalized_test_name, step_ui_name):\n # For Webkit layout tests, the suite name is the immediate directory.\n if 'webkit_layout_tests' in step_ui_name:\n index = normalized_test_name.rfind('/')\n if index > 0:\n return normalized_test_name[:index]\n return None\n\n # For gtests, the suite name is the class name.\n gtest_match = GTEST_REGEX.match(normalized_test_name)\n if gtest_match:\n return gtest_match.group(1)\n\n # For Java tests, the suite name is the class name.\n java_match = _JAVA_TEST_REGEX.match(normalized_test_name)\n if java_match:\n return java_match.group(1)\n\n return None", "def testUseAltNamingThree(self):\n expected = (IMAGE_GSD_PREFIX + '/stable-channel/x86-alex/0.12.433.269',\n '0.12.433.269', 'stable-channel', 'mp')\n actual = cb_name_lib.GetNameComponents(self.board, self.version_string, 3)\n self.assertEqual(expected, actual)", "def fixture_microbial_sample_name():\n return \"microbial_name_test\"", "def testGetStageNamesSmoke(self):\n stage = self.ConstructStage()\n self.assertEqual(stage.GetStageNames(), ['Builder'])", "def testUseDefaultNaming(self):\n expected = (IMAGE_SERVER_PREFIX + '/stable-channel/x86-alex/0.12.433.269',\n '0.12.433.269', 'stable-channel', 'mp')\n actual = cb_name_lib.GetNameComponents(self.board, self.version_string, 0)\n self.assertEqual(expected, actual)", "def _make_partitionsTest_label(chain_parts):\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario == 'partitionsTest'\n\n \n\n return \"\"\"\n partgen(\n [(20et, 0eta320)]\n \n simple([(40et, 0eta320) (50et, 0eta320)])\n simple([(35et, 0eta240) (55et, 0eta240)])\n )\"\"\"", "def prepend_type(prefix, test_list):\n br_test = 'buildrunner_tests'\n return (\n ['%s_%s' % (prefix, value) for value in test_list if value != br_test] +\n filter(br_test.__eq__, test_list)) # Add back in buildrunner_tests.", "def get_nice_names(self) -> List[str]:\n result = []\n for elements in self._get_results_list():\n result.append(elements[1])\n return result", "def test_legal_names(self):\n test_list = generate_products()\n names_list = []\n for i in test_list:\n names_list.append(i[0])\n for name in names_list:\n nameparts = name.split()\n self.assertEqual(len(nameparts), 2,\n msg=\"missing noun, space, or adj\")\n the_adj = nameparts[0]\n self.assertIn(the_adj, ADJECTIVES, msg='Bad Adj')\n the_noun = nameparts[1]\n self.assertIn(the_noun, NOUNS, msg='Bad Noun')", "def getTestSuite():\n test_suite = unittest.TestSuite([])\n\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestDistReaders))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestPySnpTools))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestDistributedBed))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestFileCache))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestUtilTools))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestIntRangeSet))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestSnpDocStrings))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestPstDocStrings))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestKrDocStrings))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestSnpGen))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestGenerate))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestExampleFile))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestPstMemMap))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestSnpMemMap))\n test_suite.addTests(NaNCNCTestCases.factory_iterator())\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestPstReader))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestKernelReader))\n\n return test_suite", "def test_reffs(self):\n self.assertEqual((\"1\" in list(map(lambda x: str(x), self.TEI.reffs))), True)\n self.assertEqual((\"1.pr\" in list(map(lambda x: str(x), self.TEI.reffs))), True)\n self.assertEqual((\"2.40.8\" in list(map(lambda x: str(x), self.TEI.reffs))), True)", "def list_tests(self, executable):\n # This will return an exit code with the number of tests available\n try:\n output = subprocess.check_output(\n [executable, \"--list-test-names-only\"],\n stderr=subprocess.STDOUT,\n universal_newlines=True,\n )\n except subprocess.CalledProcessError as e:\n output = e.output\n\n result = output.strip().split(\"\\n\")\n\n return result", "def source_test_file_name():\n return 'feature'", "def get_simple_test_order(docopt_args):\n test_order = ['screen_config', 'stabilization', 'manual_ccf_default', 'manual_ccf_brightest']\n if docopt_args['--hdr']:\n test_order += ['manual_ccf_hdr']\n \n test_order += ['lum_profile']\n \n abc_def_tests = {\n True: ['default', 'default_100', 'default_35', 'default_12', 'default_3'],\n False: ['default', 'default_low_backlight']\n }\n test_order += abc_def_tests[bool(docopt_args['--defabc'])]\n \n abc_br_tests = {\n True: ['brightest', 'brightest_100', 'brightest_35', 'brightest_12', 'brightest_3'],\n False: ['brightest', 'brightest_low_backlight']\n }\n test_order += abc_br_tests[bool(docopt_args['--brabc'])]\n \n if docopt_args['--hdr']:\n abc_hdr_tests = {\n True: ['hdr10', 'hdr10_100', 'hdr10_35', 'hdr10_12', 'hdr10_3'],\n False: ['hdr10', 'hdr10_low_backlight']\n }\n test_order += abc_hdr_tests[bool(docopt_args['--hdrabc'])]\n return test_order" ]
[ "0.7208352", "0.6553833", "0.65289885", "0.6420306", "0.6344564", "0.62654716", "0.62462103", "0.62398034", "0.6210148", "0.6169689", "0.60774684", "0.60527426", "0.5981892", "0.5960162", "0.59601563", "0.59381056", "0.58674467", "0.5789974", "0.5787742", "0.5770073", "0.5759004", "0.5707391", "0.5698837", "0.56868416", "0.5676922", "0.5657877", "0.56458765", "0.55823374", "0.5569043", "0.5551881", "0.55257964", "0.5514843", "0.5495817", "0.5486387", "0.5464065", "0.54589504", "0.544361", "0.5441347", "0.54088634", "0.5376224", "0.53724194", "0.5371659", "0.53688765", "0.5340431", "0.5329944", "0.53207517", "0.5310421", "0.53097296", "0.5306881", "0.5300024", "0.5285349", "0.5269498", "0.52612054", "0.522283", "0.52177846", "0.52037144", "0.5196651", "0.5193654", "0.5185654", "0.51669604", "0.516664", "0.51620954", "0.5157338", "0.5150494", "0.51326424", "0.5130749", "0.5126015", "0.5117643", "0.51169187", "0.5115271", "0.51149744", "0.5109098", "0.5104199", "0.50764215", "0.5076154", "0.5075155", "0.5074046", "0.5063929", "0.50530136", "0.5052361", "0.50455093", "0.50348705", "0.5029533", "0.50290334", "0.5027203", "0.50263536", "0.5021889", "0.5021324", "0.50192183", "0.5008364", "0.50074583", "0.5005789", "0.5005608", "0.50020677", "0.49967742", "0.49953383", "0.49934998", "0.49932495", "0.49923494", "0.49919403" ]
0.5076667
73
Tests that trigger from outside BMC need Hostname information to BMC and userver
def set_external(args): if args.host: os.environ["TEST_HOSTNAME"] = args.host # If user gave a hostname, determine oob name from it and set it. if "." in args.host: index = args.host.index(".") os.environ["TEST_BMC_HOSTNAME"] = ( args.host[:index] + "-oob" + args.host[index:] ) if args.bmc_host: os.environ["TEST_BMC_HOSTNAME"] = args.bmc_host
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_host(self):\n pass", "def test_perform_host_action(self):\n pass", "def test_get_host_access(self):\n pass", "def test(cls, hostname):\n pass", "def test_hostname_value(self):\n \n hostname = get_hostname()\n \n # Check to make sure the hostname is \"tjw-imac.grid.labs\"\n self.assertEqual(hostname, 'tjw-imac.grid.labs')", "def getHost():", "def getHost():", "def test_vms_host(self):\n testflow.step(\"Check if VM's started on different hosts\")\n assert (\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[0]) !=\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[1])\n )", "def test_vms_host(self):\n testflow.step(\"Check if VM's started on different hosts\")\n assert (\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[0]) !=\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[1])\n )", "def test_host_header(self):\n hostname = b\"server_name_1\"\n\n def update_expected_server(expected):\n expected[3][\"attributes\"].update(\n {\"http.server_name\": hostname.decode(\"utf8\")}\n )\n return expected\n\n self.scope[\"headers\"].append([b\"host\", hostname])\n app = otel_asgi.OpenTelemetryMiddleware(simple_asgi)\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs, modifiers=[update_expected_server])", "def checkGetHostByName(self, result):\n self.assertEquals(result, '127.0.0.1')", "def setUp(self):\n client.theResolver = FakeResolver()\n self.hostname = 'example.com'\n self.ghbntest = 'getHostByNameTest'", "def test_host_validation(runner: CliRunner) -> None:\n invalid_res = runner.invoke(cli.main, [\"-b\", \"1.2.3.4.5\"])\n assert invalid_res.exit_code == 2\n assert 'Invalid value for \"-b\" / \"--bind-address\"' in invalid_res.output\n assert \"'host' is invalid in configuration\" in invalid_res.output", "def test_download_host(self):\n pass", "def test_hello_single_host(self):\n args = Namespace()\n args.topic = '/canyouhearme'\n args.emit_period = 0.1\n args.print_period = 1.0\n args.ttl = None\n args.once = True\n with mock.patch('socket.gethostname', return_value='!nv@lid-n*de-n4me'):\n summary = SummaryTable()\n hello_verb = HelloVerb()\n hello_verb.main(args=args, summary_table=summary)\n expected_summary = _generate_expected_summary_table()\n self.assertEqual(summary._pub, expected_summary._pub)\n self.assertEqual(summary._sub, expected_summary._sub)\n self.assertEqual(summary._send, expected_summary._send)\n self.assertEqual(summary._receive, expected_summary._receive)", "def test_server_info(self):\n pass", "def test_get_current_request_hostname(self):\r\n assert_is_none(get_current_request_hostname())", "def test_rebuilt_server_hostname(self):\n remote_client = self.server_behaviors.get_remote_instance_client(\n self.server, self.servers_config)\n hostname = remote_client.get_hostname()\n self.assertEqual(hostname, self.expected_name)", "def getHostInfo():", "async def test_error_invalid_host(hass: HomeAssistant) -> None:\n with patch(\n \"homeassistant.components.asuswrt.config_flow.socket.gethostbyname\",\n side_effect=gaierror,\n ):\n result = await hass.config_entries.flow.async_init(\n DOMAIN,\n context={\"source\": SOURCE_USER},\n data=CONFIG_DATA,\n )\n\n assert result[\"type\"] == data_entry_flow.FlowResultType.FORM\n assert result[\"errors\"] == {\"base\": \"invalid_host\"}", "def test_get_current_request_hostname(self):\n assert get_current_request_hostname() is None", "def test_get_internal_host(matrix):\n matrix.charm_config[\"prefer-internal-ip\"] = True\n matrix.charm_config[\"prefer-internal-host\"] = True\n assert matrix.get_internal_host() == \"10.10.10.10\"\n matrix.charm_config[\"prefer-internal-ip\"] = False\n assert matrix.get_internal_host() == \"mock.fqdn\"", "def test_getHostByName(self):\n d = client.getHostByName(self.ghbntest)\n d.addCallback(self.checkGetHostByName)\n return d", "def test_host_header(self):\n hostname = b\"server_name_1\"\n\n def update_expected_server(expected):\n expected[3][\"attributes\"].update(\n {SpanAttributes.HTTP_SERVER_NAME: hostname.decode(\"utf8\")}\n )\n return expected\n\n self.scope[\"headers\"].append([b\"host\", hostname])\n app = otel_asgi.OpenTelemetryMiddleware(simple_asgi)\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs, modifiers=[update_expected_server])", "def handle_hostname(bot, ievent):\n try:\n item = ievent.args[0]\n except IndexError:\n ievent.missing('<ipnr>')\n return\n try:\n hostname = socket.gethostbyaddr(item)\n ievent.reply(hostname[0])\n except:\n ievent.reply(\"can't match \" + str(item))", "def test_vms_hosts(self):\n testflow.step(\"Check if VM's started on the same host\")\n assert (\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[0]) ==\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[1])\n )", "def test_vms_hosts(self):\n testflow.step(\"Check if VM's started on the same host\")\n assert (\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[0]) ==\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[1])\n )", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_arguments_parser(self):\n self.assertEqual('monitoring-dc.app.corp',\n self.plugin.options.hostname)", "def handle_host(self, host):\n LOG.info('FakeHandler: handle host %s' % host)", "def usage(self, host):", "def test_post_accepts_known_host(self, publish_mock: mock.Mock) -> None:\n\n def side_effect(*args: str, **_: str) -> Any:\n if args[0] == \"registry:first:value\":\n return [\"00:00:00:00:00\"]\n if args[0] == \"app_url\":\n return [\"/\"]\n if args[0] == \"jinja:render\":\n return [\"\"]\n return mock.DEFAULT\n\n publish_mock.side_effect = side_effect\n\n response = self.request(\"/\", method=\"POST\", host=\"host1\")\n\n self.assertEqual(response.code, 303)", "def test_build_command_hostname(self):\n actual_result = IperfClientCommandBuilder()\\\n .set_server_hostname(SERVER_HOST)\\\n .build_client_command()\n self.assertListEqual(actual_result, ['iperf', '-c', 'server'])", "def test_error_when_bgp_ipv6_conflict(self, m_client):\n startup.hostname = \"not_host\"\n m_client.get_hostnames_from_ips = Mock()\n m_client.get_hostnames_from_ips.return_value = {\"abcd::beef\":\"host\"}\n self.assertRaises(SystemExit, startup.error_if_bgp_ip_conflict,\n None, \"abcd::beef\")", "def test_correct_sheme_host_sent_with_request(self):\n req = self.httpbin.get_my_ip(dry_run=True)\n self.assertIn(self.httpbin.client['host'], urlparse(req.prepared_request.url).netloc)\n self.assertIn(self.httpbin.client['scheme'], urlparse(req.prepared_request.url).scheme)\n self.assertIn(self.httpbin.client['get_my_ip']['path'], urlparse(req.prepared_request.url).path)", "def testGetHostConfig(self):\n config_path = GetTestFilePath('unified_lab_config/valid_lab/hosts')\n pool = lab_config.UnifiedLabConfigPool(config_path)\n pool.LoadConfigs()\n host = pool.GetHostConfig('crystalball1.atc.google.com')\n self.assertEqual('crystalball1.atc.google.com', host.hostname)\n self.assertEqual('lab_user1', host.host_login_name)\n self.assertEqual('crystalball', host.cluster_name)\n self.assertEqual('path/to/config.xml', host.tf_global_config_path)\n self.assertEqual('-F path/to/ssh/config', host.ssh_arg)", "def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=True)", "def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=True)", "def test_get_host_configuration_metrics(self):\n pass", "def pre_upgrade_checks(self):\n\n #HostOverview\n Logger.info(\"******************************************************************************************************************************************************\")\n Logger.info(\"\\t\\t\\t\\t\\t\\t\\tHOST OVERVIEW\")\n Logger.info(\"******************************************************************************************************************************************************\")\n print (\"\\n\")\n Logger.info(\"Ambari version\\t\\t:{0}\".format(self.ambari_version))\n\n #Check OS\n os = platform.dist()\n if os[1] != None:\n Logger.info(\"Operating System\\t\\t:{0} {1} - {2}\".format(os[0],os[1],os[2]))\n else:\n Logger.error(\"Unable to fetch OS details.\")\n self.terminate()\n return\n\n self.check_java_version()\n self.check_exactly_one_current_version()\n\n\n #Check if rack awareness is enabled ?\n rack_awareness = \"SELECT DISTINCT rack_info FROM hosts WHERE rack_info!='/default-rack';\"\n self.cursor.execute(rack_awareness)\n result = self.cursor.fetchone()\n if result is None or len(result) != 1:\n Logger.info(\"Rack Awareness ?\\t\\tNo\\n\")\n else:\n Logger.info(\"Rack Awareness ?\\t\\tYes\\n\")\n\n #Security Overview\n self.check_security()\n\n #Check High Availability configuration\n self.check_high_availability()\n\n #Check Metastores\n self.check_metastore()", "def goodmorning(host):", "def test_add_hostname(self):\n hostname = 'test123.com'\n info = self.api.add_hostname(hostname, tags=['asd'])\n self.assertEqual(info['value'], hostname)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])", "async def test_user_flow_enters_dns_name(hass: HomeAssistant) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n assert result[\"type\"] == \"form\"\n assert result[\"errors\"] == {}\n\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {CONF_HOST: \"ip.only\"},\n )\n await hass.async_block_till_done()\n\n assert result2[\"type\"] == FlowResultType.FORM\n assert result2[\"errors\"] == {\"base\": \"no_ip\"}\n\n with _patch_wizlight(), patch(\n \"homeassistant.components.wiz.async_setup_entry\",\n return_value=True,\n ) as mock_setup_entry, patch(\n \"homeassistant.components.wiz.async_setup\", return_value=True\n ) as mock_setup:\n result3 = await hass.config_entries.flow.async_configure(\n result2[\"flow_id\"],\n TEST_CONNECTION,\n )\n await hass.async_block_till_done()\n\n assert result3[\"type\"] == \"create_entry\"\n assert result3[\"title\"] == \"WiZ Dimmable White ABCABC\"\n assert result3[\"data\"] == {\n CONF_HOST: \"1.1.1.1\",\n }\n assert len(mock_setup.mock_calls) == 1\n assert len(mock_setup_entry.mock_calls) == 1", "def test_init_correct_transport_bindaddr(self):\n os.environ = BASE_ENVIRON\n self.plugin.init([\"dummy\", \"boom\"])\n bindaddr = self.plugin.getBindAddresses()\n self.assertEqual(bindaddr[\"dummy\"], ('127.0.0.1', 5556))\n self.assertEqual(bindaddr[\"boom\"], ('127.0.0.1', 6666))\n self.assertOutputLinesStartWith(\"VERSION \")", "def test_host_header_set_ok(self):\n requests = [\n \"GET / HTTP/1.1\\r\\nHost: tempesta-tech.com:80\\r\\n\\r\\n\",\n \"GET / HTTP/1.1\\r\\nHost: tempesta-tech.com \\r\\n\\r\\n\",\n \"GET http://tempesta-tech.com/ HTTP/1.1\\r\\nHost: tempesta-tech.com\\r\\n\\r\\n\",\n \"GET http://user@tempesta-tech.com/ HTTP/1.1\\r\\nHost: tempesta-tech.com\\r\\n\\r\\n\",\n (\n \"GET http://user@tempesta-tech.com/ HTTP/1.1\\r\\n\"\n \"Host: tempesta-tech.com\\r\\n\"\n \"Forwarded: host=tempesta-tech.com\\r\\n\"\n \"Forwarded: host=tempesta1-tech.com\\r\\n\\r\\n\"\n ),\n ]\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\", requests=requests\n )\n self.check_response(client, status_code=\"200\", warning_msg=\"frang: \")", "def test_verify_connection_to_a_device():", "def test_host_header_mismatch_empty(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\"GET http://user@tempesta-tech.com/ HTTP/1.1\\r\\nHost: \\r\\n\\r\\n\"],\n )\n self.check_response(client, status_code=\"403\", warning_msg=WARN_DIFFER)", "def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=False)", "def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=False)", "def check_hosts(zk,host_name,task,scheduler_log):\n\n #scheduler_log.debug(\"Scheduler Working...!!!\")\n try:\n #Leader Election\n leader = leaderCheck(zk=zk)\n #scheduler_log.debug(\"Leader Election Over\")\n #Update alive status to zookeeper - seems unnecessary\n imalive(zk=zk)\n #scheduler_log.debug(\"Alive Status Updated\")\n\n #If current Host is the Leader perform Scheduled Checks \n if (leader == host_name):\n scheduler_log.debug(\"%s : I am the Leader\"%host_name)\n\n #Fetch List of Hosts - From API\n host_dict = list_hosts(nova)\n allhosts = host_dict['all_list']\n api_down_nodes = host_dict['down_list']\n dishosts = host_dict['disabled_list']\n\n zk_all = zk.get_children(\"/openstack_ha/hosts/all\")\n zk_alive = zk.get_children(\"/openstack_ha/hosts/alive\")\n \n #Fetch Down nodes that are already Handeled - From Zookeeper\n zk_down = zk.get_children(\"/openstack_ha/hosts/down\")\n\n #Fetch nodes that are down and not already handled - From Zookeeper\n calculated_down_nodes = list(set(zk_all) - set(zk_alive))\n\n #Find Nodes Where Scheduler Only failed\n scheduler_down = list(set(calculated_down_nodes).difference(set(api_down_nodes)))\n for node in scheduler_down:\n scheduler_log.debug(\"HA Scheduler Failed on Node : %s \"%node)\n \n #Find Nodes Where API Only failed \n api_down = list(set(api_down_nodes).difference(set(calculated_down_nodes)))\n for node in api_down:\n scheduler_log.debug(\"API Failed on Node : %s \"%node)\n if node not in zk_all:\n scheduler_log.debug(\"HA Scheduler not even initialized %s\"%node)\n\n #Find nodes where both API and Zookeeper are failed \n api_scheduler_down = list(set(api_down_nodes).intersection(set(calculated_down_nodes)))\n\n # Possible Host states - Api only failure | Complete Host Failure ( Not yet Handled | Handling | Handled )\n if(len(api_scheduler_down))==0:\n scheduler_log.debug(\"Hosts working Normally....!!!\")\n else:\n scheduler_log.warning(\"More likely Disaster\")\n #skip if maintance\n # Here check the host in api_down_nodes(api) are present in calculated_down_nodes\n #if present start the instance migrations\n # Checking whether Cluster is Still under HA Policy\n # high availabity contiditions\n if len(api_scheduler_down) <= len(allhosts) - 1:\n scheduler_log.warn(\"Seems like Manageble Disaster\")\n for host in api_scheduler_down:\n scheduler_log.warning(\"Both Api and HA scheduler on\" +host+\" are down\")\n #checks whether down host from api is un handled(not present in down node calculate from zookeeper )\n #(host in zk_all and host not in zk_alive) == calculated_down_nodes\n if host in zk_down:\n #Node will present in zk_down only when all of it's instances are migrated\n scheduler_log.debug(\"Host %s Already handled...!!!!!\"%host)\n else:\n #Node down on api,zk and ( not handled | handling )\n if host not in dishosts:\n #Node Not disabled | disabled reason is not skippable\n scheduler_log.debug(host+\" is not disabled or reason is not maintenance\")\n if(zk.exists(\"/openstack_ha/hosts/time_out/\"+host)==None):\n scheduler_log.debug(\"Inside Time out Node Creation\")\n \n #adding host down time\n host_down_time = time.time()\n host_down_time = str.encode(str(host_down_time))\n scheduler_log.debug(host_down_time)\n zk.create(\"/openstack_ha/hosts/time_out/\"+host, host_down_time)\n \n #adding time_suffix for json_dump file name\n temp_time=time.localtime(time.time()) \n time_suffix=str(temp_time.tm_mday)+\"_\"+str(temp_time.tm_mon)+\"_\"+\\\n str(temp_time.tm_year)+\"_\"+str(temp_time.tm_hour)+\"_\"+\\\n str(temp_time.tm_min)\n enc_time_suffix=str.encode(time_suffix)\n scheduler_log.debug(time_suffix)\n zk.create(\"/openstack_ha/hosts/time_out/\"+host+\"/time_suffix\",enc_time_suffix)\n\n # call notification_mail(subj,msg) | Adding Down Node details to Notification \n try:\n subject = \"DGP Office VDI Node Down: %s\"%host\n message = \"Please Check the Network Connectivity and Powersupply as soon as possible\"\n notification_mail(subject,message,to_email=['naanalteam@naanal.in'])\n\n message = \"Please Contact System Administrator\"\n notification_mail(subject,message)\n scheduler_log.debug(\"mail in Scheduler...!\")\n except Exception as e:\n scheduler_log.debug(e)\n scheduler_log.debug(\"Error....! mail scheduler..!\")\n\n # add ping test\n ping_status=ping_check(host)\n if(ping_status):\n scheduler_log.debug(\"Not a Disaster\")\n scheduler_log.debug(\"ping test success....!!! Node is alive... Please Check the APIs,HA Scheduler and other Openstack Services\")\n\n else:\n scheduler_log.warning(\"Ping test also Failed on \"+host+\" proceed with migration\")\n if (zk.exists(\"/openstack_ha/hosts/start_migration/\"+ host)): # it checks the permission from the dashborad\n scheduler_log.warning(\" api down host :\"+host+\"present in zookeeper down_node:\")\n scheduler_log.debug(\"Strart migration....!!!!!\")\n scheduler_log.debug(\"migrating instances from the \"+host)\n tmp_time_suffix=zk.get(\"/openstack_ha/hosts/time_out/\"+host+\"/time_suffix\")[0]\n zk_time_suffix = tmp_time_suffix.decode() \n instance_migration(nova,api_down_nodes,task,zk_time_suffix)\n else:\n #check for time out\n scheduler_log.debug(\"Checking Timeout for Down Node\",host)\n curent_time = time.time()\n if (zk.exists(\"/openstack_ha/hosts/time_out/\"+host)):\n down_host_failuretime = zk.get(\"/openstack_ha/hosts/time_out/\"+host)[0]\n down_host_failuretime = down_host_failuretime.decode(encoding='UTF-8')\n scheduler_log.warning(\"down_host_failuretime\",down_host_failuretime)\n down_host_failuretime = float(down_host_failuretime)\n time_interval = curent_time - down_host_failuretime\n if time_interval>migrate_time:\n tmp_time_suffix=zk.get(\"/openstack_ha/hosts/time_out/\"+host+\"/time_suffix\")[0]\n zk_time_suffix = tmp_time_suffix.decode()\n instance_migration(nova,api_down_nodes,task,zk_time_suffix)\n else:\n scheduler_log.debug(\"Will Wait for another %d\"%(migrate_time-time_interval))\n else:\n scheduler_log.debug(\"%s Node Does'nt have TimeOut Value. Hence will not migrate forever\"%host)\n else:\n scheduler_log.debug(\"Host %s Under Maintenance\"%host)\n \n else:\n scheduler_log.warning(\"Un-Manageble Disaster Too many Nodes are down\")\n else:\n scheduler_log.debug(\"%s : Leader is %s\"%(host_name,leader))\n\n except Exception as e:\n if issubclass(e.__class__,kexception.NoNodeError):\n scheduler_log.exception(\"No node error\")\n elif any(issubclass(e.__class__, lv) for lv in kazoo_exceptions):\n scheduler_log.exception(\"Kazoo Exception.....: \")\n time.sleep(2)\n try:\n zk = KazooClient(hosts='127.0.0.1:2181')\n zk.start() \n Node_creation = createNodeinAll(zk=zk, host_name=host_name)\n election_Node = election_node(zk=zk, host_name=host_name)\n except:\n pass\n else:\n scheduler_log.warning(\"Unhandled Error \")\n scheduler_log.exception(\"\")", "def test_check_process_servers(self):\n self.cmd._process_servers(TEST_HOSTS, self.cloud_project)\n\n for host_id, test_host in TEST_HOSTS.items():\n host = CloudHost.objects.get(host_id=host_id)\n ips = host.ip_addresses\n self.assertEqual(host.hostname, test_host['hostname'])\n self.assertIn(test_host['tag'], host.tags.names())\n self.assertEqual(self.cloud_provider, host.cloudprovider)\n for ip in test_host['ips']:\n self.assertIn(ip, list(ips))\n self.assertEqual(host.hypervisor.hostname, test_host['hypervisor'])\n\n # check the creation date only for new hosts\n if host_id.find('_os_') != -1:\n self.assertEqual(\n datetime.strptime(\n test_host['created'],\n self.cmd.DATETIME_FORMAT\n ),\n host.created,\n )", "def test_healthcheck(self):\n self.assertEqual(\"OK\", \"OK\")", "def test_vms_destination(self):\n testflow.step(\"Deactivate host %s\", conf.HOSTS[0])\n assert not ll_hosts.deactivate_host(positive=True, host=conf.HOSTS[0])", "def check_host(host):\n try:\n request = requests.get(host[0], timeout=3)\n host[1] = bool(re.search(host[1], request.text))\n except Exception:\n host[1] = False\n if host[1] is False:\n os.system(CONFIG['mail_command'].format(\n 'CRITICAL: {} is critical'.format(host[0])))\n\n return host", "def test_process_host_commands(self):\n\n command = [\"df\", \"-h\"]\n output = run(verification.process_host_commands(command))\n self.assertTrue(\"```\\nThat command is not available.```\" not in output)\n\n command = [\"ls\", \"-la\"]\n output = run(verification.process_host_commands(command))\n self.assertEqual(\"```\\nThat command is not available.```\", output)", "def test_get_host_configuration_metrics1(self):\n pass", "def test_host_header_mismatch(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\"GET http://user@tempesta-tech.com/ HTTP/1.1\\r\\nHost: example.com\\r\\n\\r\\n\"],\n )\n self.check_response(client, status_code=\"403\", warning_msg=WARN_DIFFER)", "def echo_host():\n run('echo %(settings)s; echo %(hosts)s' % env)", "def getRemoteHost():", "def getHost(self): #$NON-NLS-1$\r", "def test_default_host_http_required(self):\n client = self.base_scenario(\n frang_config=\"\", requests=[\"GET / HTTP/1.1\\r\\nHost: 127.0.0.1\\r\\n\\r\\n\"]\n )\n self.check_response(client, status_code=\"403\", warning_msg=WARN_IP_ADDR)", "def test_hello_single_host():\n args = Namespace()\n args.topic = '/canyouhearme'\n args.emit_period = 0.1\n args.print_period = 1.0\n args.ttl = None\n args.once = True\n hello_verb = HelloVerb()\n summary = hello_verb.main(args=args)\n expected_summary = _generate_expected_summary_table()\n assert summary._pub == expected_summary._pub\n assert summary._sub == expected_summary._sub\n assert summary._send == expected_summary._send\n assert summary._receive == expected_summary._receive", "def test_verify_hostname(self):\n verify_certificate_hostname(X509_DNS_ONLY, u\"twistedmatrix.com\")", "def test_execute_monitoring_schedule_vendor_v3(self):\n pass", "async def test_device_tracker_hostname_and_macaddress_after_start_hostname_missing(\n hass: HomeAssistant,\n) -> None:\n\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n device_tracker_watcher = dhcp.DeviceTrackerWatcher(\n hass,\n {},\n [{\"domain\": \"mock-domain\", \"hostname\": \"connect\", \"macaddress\": \"B8B7F1*\"}],\n )\n await device_tracker_watcher.async_start()\n await hass.async_block_till_done()\n hass.states.async_set(\n \"device_tracker.august_connect\",\n STATE_HOME,\n {\n ATTR_IP: \"192.168.210.56\",\n ATTR_SOURCE_TYPE: SourceType.ROUTER,\n ATTR_MAC: \"B8:B7:F1:6D:B5:33\",\n },\n )\n await hass.async_block_till_done()\n await device_tracker_watcher.async_stop()\n await hass.async_block_till_done()\n\n assert len(mock_init.mock_calls) == 0", "def TEST_fetch_host_info( hostname ):\n if os.path.exists(\"/tmp/test-poll-host-pubkey.pub\"):\n with open(\"/tmp/test-poll-host-pubkey.pub\", \"r\") as f:\n pubk = f.read()\n host_info = {}\n host_info['hostname'] = hostname\n host_info['public_key'] = pubk.strip()\n return host_info \n\n raise Exception(\"Missing /tmp/test-poll-host-pubkey.pub\")", "def test_host_header_as_ip6(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\"GET / HTTP/1.1\\r\\nHost: [20:11:abb::1]:80\\r\\n\\r\\n\"],\n )\n self.check_response(client, status_code=\"403\", warning_msg=WARN_IP_ADDR)", "def test_host_header_no_port_in_host(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\n \"GET http://tempesta-tech.com:80/ HTTP/1.1\\r\\nHost: tempesta-tech.com\\r\\n\\r\\n\"\n ],\n )\n self.check_response(client, status_code=\"200\", warning_msg=WARN_DIFFER)", "def test_post_monitoring_schedule_vendor_v3(self):\n pass", "def test_client_home():\n time.sleep(2.0) # prevent healthcheck + home == double tap home()\n c.home()", "def test_sanitized_hostname(self):\n value = \" ../ ../some/dubious/hostname \"\n response = clean.hostname(value)\n assert response == \"somedubioushostname\"", "def test_uptimerobot_monitor_up(self) -> None:\n expected_topic = \"Mail Server\"\n expected_message = \"\"\"\nMail Server (server2.example.com) is back UP (Host Is Reachable).\nIt was down for 44 minutes and 37 seconds.\n\"\"\".strip()\n self.check_webhook(\"uptimerobot_monitor_up\", expected_topic, expected_message)", "def test_h2_host_header_as_ip(self):\n self._test(\n headers=[\n (\":path\", \"/\"),\n (\"host\", \"127.0.0.1\"),\n ],\n expected_warning=WARN_IP_ADDR,\n )", "def test_attributes(self):\n self.assertEqual(self.client.host, self.test_host)\n self.assertEqual(self.client.auth.host, self.test_host)", "async def test_api_host_info(\n hassio_handler, aioclient_mock: AiohttpClientMocker\n) -> None:\n aioclient_mock.get(\n \"http://127.0.0.1/host/info\",\n json={\n \"result\": \"ok\",\n \"data\": {\n \"chassis\": \"vm\",\n \"operating_system\": \"Debian GNU/Linux 10 (buster)\",\n \"kernel\": \"4.19.0-6-amd64\",\n },\n },\n )\n\n data = await hassio_handler.get_host_info()\n assert aioclient_mock.call_count == 1\n assert data[\"chassis\"] == \"vm\"\n assert data[\"kernel\"] == \"4.19.0-6-amd64\"\n assert data[\"operating_system\"] == \"Debian GNU/Linux 10 (buster)\"", "def test_start_vms(self):\n testflow.step(\"Check if VM's started on different hosts\")\n assert (\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[0]) !=\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[1])\n )", "def __get_host(self) -> str:\n\t\treturn os.getenv('MQTT_DRIVEN_HOST', 'localhost')", "def horizonhost():\n env.cd = cd\n env.run = run\n env.hosts = settings.HOSTS['horizon']\n env.exists = exists", "async def execute_host(self):\n return True", "async def test_device_tracker_registered_hostname_none(hass: HomeAssistant) -> None:\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n device_tracker_watcher = dhcp.DeviceTrackerRegisteredWatcher(\n hass,\n {},\n [{\"domain\": \"mock-domain\", \"hostname\": \"connect\", \"macaddress\": \"B8B7F1*\"}],\n )\n await device_tracker_watcher.async_start()\n await hass.async_block_till_done()\n async_dispatcher_send(\n hass,\n CONNECTED_DEVICE_REGISTERED,\n {\"ip\": \"192.168.210.56\", \"mac\": \"b8b7f16db533\", \"host_name\": None},\n )\n await hass.async_block_till_done()\n\n assert len(mock_init.mock_calls) == 0\n await device_tracker_watcher.async_stop()\n await hass.async_block_till_done()", "def test_heartbeat(self):\n pass", "def test_health_get(self):\n pass", "def healthcheck(parameters): \n\n print(\"In healthcheck module\")", "def test_host_header_mismath_port_in_host(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\n \"GET http://tempesta-tech.com:81/ HTTP/1.1\\r\\nHost: tempesta-tech.com:80\\r\\n\\r\\n\"\n ],\n )\n self.check_response(client, status_code=\"403\", warning_msg=WARN_DIFFER)", "def test_getfriendlyname(\n fauxmo_server: pytest.fixture, simplehttpplugin_target: pytest.fixture\n) -> None:\n data = b'soapaction: \"urn:Belkin:service:basicevent:1#GetFriendlyName\"'\n\n resp = requests.post(\n \"http://127.0.0.1:12345/upnp/control/basicevent1\", data=data\n )\n assert resp.status_code == 200\n\n root = ET.fromstring(resp.text)\n assert root.find(\".//FriendlyName\").text == \"fake switch one\"" ]
[ "0.72601837", "0.6868456", "0.6761789", "0.6384901", "0.63416314", "0.6243301", "0.6243301", "0.6148776", "0.6148776", "0.6120381", "0.6119962", "0.6033267", "0.60227525", "0.6017332", "0.60004425", "0.5981932", "0.5963929", "0.5948897", "0.5937301", "0.5936245", "0.5922357", "0.59136134", "0.5912397", "0.5911781", "0.5907149", "0.5906329", "0.5906329", "0.5876867", "0.5876867", "0.5876867", "0.5876867", "0.5876867", "0.5876867", "0.5876867", "0.5876867", "0.5876867", "0.5876867", "0.5876867", "0.5876867", "0.5876867", "0.5876867", "0.5876867", "0.5876867", "0.5867021", "0.58560354", "0.58504236", "0.584563", "0.5829208", "0.5824119", "0.5811158", "0.5789279", "0.5770431", "0.5770431", "0.57654357", "0.57380575", "0.5736455", "0.5731677", "0.57165515", "0.5712577", "0.5701007", "0.5685095", "0.56850904", "0.5678729", "0.5678729", "0.56584996", "0.56576854", "0.5657629", "0.5655401", "0.56460637", "0.5635803", "0.56115025", "0.5598623", "0.5577242", "0.5571462", "0.5559727", "0.55527174", "0.5549823", "0.5540819", "0.5537829", "0.5523272", "0.5521999", "0.5521047", "0.55199903", "0.55181843", "0.5517372", "0.55097437", "0.55063844", "0.5505918", "0.54974097", "0.549693", "0.5496564", "0.5490957", "0.54864705", "0.5482726", "0.5480428", "0.5472762", "0.54602027", "0.5456632", "0.54548156", "0.5449112" ]
0.6309198
5
Optional arguments for firmware upgrade test
def set_fw_args(args): os.environ["TEST_FW_OPT_ARGS"] = args.firmware_opt_args
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upgrade_with_auto_upgrade_latest_engine_enabled():", "def null_upgrade_step(setup_tool):\n pass", "def test_patch_hyperflex_server_firmware_version(self):\n pass", "def test_update_hyperflex_server_firmware_version(self):\n pass", "def check_arguments(self):\n ## only four test operation is permitted, if given anything apart from this, then it should print error message\n if (self.args.snap is False and self.args.snapcheck is False and self.args.check is False and self.args.diff is False and self.args.version is False):\n self.logger.error(colorama.Fore.RED +\n \"Arguments not given correctly, Please refer help message\", extra=self.log_detail)\n self.parser.print_help()\n sys.exit(1)\n\n if(((self.args.snap is True and (self.args.pre_snapfile is None or self.args.file is None)) or\n (self.args.snapcheck is True and self.args.file is None) or\n (self.args.check is True and self.args.file is None)) and \n (self.args.testfiles is None or self.args.hostname is None)\n ):\n self.logger.error(colorama.Fore.RED +\n \"Arguments not given correctly, Please refer help message\", extra=self.log_detail)\n self.parser.print_help()\n sys.exit(1)\n if self.args.diff is True:\n if (self.args.pre_snapfile is not None and os.path.isfile(self.args.pre_snapfile)) and (\n self.args.post_snapfile is not None and os.path.isfile(self.args.post_snapfile)):\n comp = Comparator()\n comp.compare_diff(\n self.args.pre_snapfile,\n self.args.post_snapfile,\n None)\n sys.exit(1)\n else:\n if (self.args.file is None) and (\n self.args.testfiles is None or self.args.hostname is None):\n self.parser.print_help()\n sys.exit(1)", "def full_args():\n return setup_args()", "def test_do_upgrade(self):\n with self.with_config_update():\n result = self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:latest\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0", "def test_firmware_version(self):\n self._verify_firmware_version()", "def test_uparforvarg(self):", "def _get_installation_args(self, install_optional, production_only, force, frozen_lockfile):\n raise NotImplementedError", "def check_backtester_args(parser: ArgumentParser, args: Namespace) -> None:\n check_ignoreodds_arg(parser, args)", "def pre_upgrade(self, upgrade_specs):\n pass", "def test_upgrade_non_vendor(self):\n with pytest.raises(\n ClickException,\n match=r\"The .* with id '.*' already has version .*. Nothing to upgrade.\",\n ):\n self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:100.0.0\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )", "def test_usage_02(self):\n\n usage = self.sqlbak([\"--foobaz\"])\n\n assert \"usage: sqlbak directory\" in usage\n assert \"[--tarname=/path/to/tar]\" in usage\n assert \"[--ms-towait=ms]\" in usage\n assert \"[--dry-run]\" in usage\n assert \"[--integrity-check]\" in usage\n assert \"[--follow-links]\" in usage\n assert \"[--verbose]\" in usage\n assert \"[--version]\" in usage", "def test_main_named_args():\n with mock.patch('uflash.flash') as mock_flash:\n uflash.main(argv=['-r', 'baz.hex'])\n mock_flash.assert_called_once_with(path_to_python=None,\n paths_to_microbits=[],\n path_to_runtime='baz.hex',\n minify=False,\n keepname=False)", "def _set_version(args: Any):\n if args['msc']:\n version = 'msc'\n elif args['nx']:\n version = 'nx'\n elif args['optistruct']:\n version = 'optistruct'\n elif args['nasa95']:\n version = 'nasa95'\n elif args['mystran']:\n version = 'mystran'\n else:\n version = None\n args['version'] = version\n del args['msc'], args['nx'], args['nasa95'], args['mystran'], args['optistruct']", "def test_update_with_support(update_command, first_app, second_app):\n # Configure no command line options\n options = update_command.parse_options([\"--update-support\"])\n\n update_command(**options)\n\n # The right sequence of things will be done\n assert update_command.actions == [\n # Host OS is verified\n (\"verify-host\",),\n # Tools are verified\n (\"verify-tools\",),\n # App configs have been finalized\n (\"finalize-app-config\", \"first\"),\n (\"finalize-app-config\", \"second\"),\n # Update the first app\n (\"verify-app-template\", \"first\"),\n (\"verify-app-tools\", \"first\"),\n (\"code\", \"first\", False),\n (\"cleanup-support\", \"first\"),\n (\"support\", \"first\"),\n (\"cleanup\", \"first\"),\n # Update the second app\n (\"verify-app-template\", \"second\"),\n (\"verify-app-tools\", \"second\"),\n (\"code\", \"second\", False),\n (\"cleanup-support\", \"second\"),\n (\"support\", \"second\"),\n (\"cleanup\", \"second\"),\n ]", "def setup_args(**kargs):\n args = [get_nupack_exec_path(kargs['exec_name']),\n '-material', kargs['material'], '-sodium', kargs['sodium'],\n '-magnesium', kargs['magnesium'], '-dangles', kargs['dangles'], '-T', kargs['T']]\n if kargs['multi']: args += ['-multi']\n if kargs['pseudo']: args += ['-pseudo']\n return args", "def test_main_two_args():\n with mock.patch('uflash.flash', return_value=None) as mock_flash:\n uflash.main(argv=['foo.py', '/media/foo/bar'])\n mock_flash.assert_called_once_with(\n path_to_python='foo.py',\n paths_to_microbits=['/media/foo/bar'],\n path_to_runtime=None,\n minify=False,\n keepname=False)", "def main():\n parser = argparse.ArgumentParser()\n register_device_args(parser)\n register_update_args(parser, default_os_check='update', default_pave=False)\n args = parser.parse_args()\n update(args.system_image_dir, args.os_check, args.target_id,\n args.serial_num, args.pave)", "def requirements():\n print('Verifying basic requirements met')\n # python version 3+ is required\n if sys.version_info[0] < 3:\n print('This program requires Python 3')\n print('Exiting')\n exit(1)\n # you must provide a device list or device file\n if device_file == \"\" and devices == [\"\"]:\n print('You need to either specify the devices (-de) or specify a file with a list of devices one per line (-df)')\n print('No upgrades were performed')\n sys.exit(1)\n if device_file != \"\" and devices != [\"\"]:\n print('You need to either specify the devices (-de) or specify a file with a list of devices one per line (-df)')\n print('No upgrades were performed')\n sys.exit(1)\n if not partition:\n print('You need to specify a partition (-pa) for upgrade')\n sys.exit(1)\n if not upgrade_file:\n print('You must specify a local file to use for upgrade')\n sys.exit(1)", "def test_create_hyperflex_server_firmware_version(self):\n pass", "def test_badNumberOfArgumentsToChangeVersionsScript(self):\n versionChanger = ChangeVersionsScript()\n self.assertRaises(SystemExit, versionChanger.main, [])", "def install_args(f):\n args = [\n argh.arg('--clean-db', help=CLEAN_DB_HELP_MSG),\n argh.arg('--private-ip', help=PRIVATE_IP_HELP_MSG),\n argh.arg('--public-ip', help=PUBLIC_IP_HELP_MSG),\n argh.arg('-a', '--admin-password', help=ADMIN_PASSWORD_HELP_MSG)\n ]\n for arg in args:\n f = arg(f)\n return f", "def install_args(f):\n args = [\n argh.arg('--clean-db', help=CLEAN_DB_HELP_MSG),\n argh.arg('--private-ip', help=PRIVATE_IP_HELP_MSG),\n argh.arg('--public-ip', help=PUBLIC_IP_HELP_MSG),\n argh.arg('-a', '--admin-password', help=ADMIN_PASSWORD_HELP_MSG),\n ]\n for arg in args:\n f = arg(f)\n return f", "def test_upgrade_opt(self):\n with testing_utils.tempdir() as tmp:\n modfn = os.path.join(tmp, 'model')\n with open(modfn, 'w') as f:\n f.write('Test.')\n optfn = modfn + '.opt'\n base_opt = {\n 'model': 'tests.test_params:_ExampleUpgradeOptAgent',\n 'dict_file': modfn + '.dict',\n 'model_file': modfn,\n }\n with open(optfn, 'w') as f:\n json.dump(base_opt, f)\n\n pp = ParlaiParser(True, True)\n opt = pp.parse_args(['--model-file', modfn])\n agents.create_agent(opt)", "def test_main_first_arg_version(capsys):\n with pytest.raises(SystemExit):\n uflash.main(argv=['--version'])\n\n stdout, stderr = capsys.readouterr()\n expected = uflash.get_version()\n # On python 2 --version prints to stderr. On python 3 to stdout.\n # https://bugs.python.org/issue18920\n assert (expected in stdout) or (expected in stderr)", "def outdated(self, arguments):\n puts_err(colored.red(\"Not implemented!\"))", "def test_upgrade_required_mock(self):\n with patch(\n \"aea.cli.upgrade.ItemUpgrader.check_upgrade_is_required\",\n return_value=\"100.0.0\",\n ):\n result = self.runner.invoke(\n cli,\n [\n \"-v\",\n \"DEBUG\",\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:latest\",\n ],\n catch_exceptions=False,\n )\n assert result.exit_code == 0", "def test_noArgs(self):\n logs = []\n\n with self.assertRaises(SystemExit) as e:\n CheckNewsfragmentScript(logs.append).main([])\n\n self.assertEqual(\n e.exception.args, (\"Must specify one argument: the Twisted checkout\",)\n )", "def main():\n\n do_install, do_uninstall, notify_once = parse_args()\n if do_install:\n install()\n elif do_uninstall:\n uninstall()\n else:\n notify_outdated_formula(always_notify=not notify_once)", "def test_init_unsupported_version(self, monkeypatch, runway_config, runway_context):\n monkeypatch.setattr(MODULE + \".__version__\", \"1.3\")\n with pytest.raises(SystemExit) as excinfo:\n assert not Runway(runway_config, runway_context)\n assert excinfo.value.code == 1", "def register_update_args(arg_parser: argparse.ArgumentParser,\n default_os_check: Optional[str] = 'check',\n default_pave: Optional[bool] = True) -> None:\n serve_args = arg_parser.add_argument_group('update',\n 'device updating arguments')\n serve_args.add_argument('--system-image-dir',\n help='Specify the directory that contains the '\n 'Fuchsia image used to pave the device. Only '\n 'needs to be specified if \"os_check\" is not '\n '\"ignore\".')\n serve_args.add_argument('--serial-num',\n default=os.environ.get('FUCHSIA_FASTBOOT_SERNUM'),\n help='Serial number of the device. Should be '\n 'specified for devices that do not have an image '\n 'flashed.')\n serve_args.add_argument('--os-check',\n choices=['check', 'update', 'ignore'],\n default=default_os_check,\n help='Sets the OS version enforcement policy. If '\n '\"check\", then the deployment process will halt '\n 'if the target\\'s version does not match. If '\n '\"update\", then the target device will '\n 'be reflashed. If \"ignore\", then the OS version '\n 'will not be checked.')\n serve_args.add_argument('--pave',\n action='store_true',\n help='Performs a pave instead of a flash. '\n 'Device must already be in Zedboot')\n serve_args.add_argument('--no-pave',\n action='store_false',\n dest='pave',\n help='Performs a flash instead of a pave '\n '(experimental).')\n serve_args.set_defaults(pave=default_pave)", "def prepareUninstall():\n pass", "def add_extra_args(self):\n self.parser.add_argument('--device', dest='device', type=str, help='Device ID, e.g. d--0001')", "def add_optional_arguments(self, *args):\n self._add_sample_specific_arguments(False, *args)", "def test_nothing_to_upgrade(self, mock_click_echo):\n result = self.run_cli_command(\"upgrade\", cwd=self._get_cwd())\n assert result.exit_code == 0\n mock_click_echo.assert_any_call(\"Starting project upgrade...\")\n mock_click_echo.assert_any_call(\n f\"Updating AEA version specifier from ==0.1.0 to {compute_specifier_from_version(get_current_aea_version())}.\"\n )\n\n # test 'aea_version' of agent configuration is upgraded\n expected_aea_version_specifier = compute_specifier_from_version(\n get_current_aea_version()\n )\n agent_config = self.load_agent_config(self.current_agent_context)\n assert agent_config.aea_version == expected_aea_version_specifier\n assert agent_config.author == self.author\n assert agent_config.version == DEFAULT_VERSION", "def this_needs_work_test_hook_upgrade(self):\n self.do_test_hook_install(testee.upgrade_setup, True)", "def add_arguments(parser):\n parser.add_argument('-e', '--environment', help='Environment name', required=True)\n parser.add_argument('-w', '--dont-wait', help='Skip waiting for the init to finish', action='store_true')\n parser.add_argument('-l', '--version-label', help='Version label', required=False)", "def add_args(cls, parser: argparse.ArgumentParser ):\n try:\n parser.add_argument('--wallet.name',required=False, default=bittensor.defaults.wallet.name, help='''The name of the wallet to unlock for running bittensor''')\n parser.add_argument('--wallet.hotkey', required=False, default=bittensor.defaults.wallet.hotkey, help='''The name of wallet's hotkey.''')\n parser.add_argument('--wallet.path',required=False, default=bittensor.defaults.wallet.path, help='''The path to your bittensor wallets''')\n except argparse.ArgumentError:\n # re-parsing arguments.\n pass", "def test_args(self):\n parser = argparse.ArgumentParser(\n prog=\"sysbottle\", description=\"sysbottle is parsed\"\n )\n subparsers = parser.add_subparsers()\n sysbottle.build(subparsers)\n args = parser.parse_args(\n [\n \"sysbottle\",\n \"abc.txt\",\n \"-c\",\n \"90\",\n \"-q\",\n \"1\",\n \"-d\",\n \"sda\",\n \"-i\",\n \"5\",\n \"-t\",\n \"3\",\n ]\n )\n self.assertTrue(hasattr(args, \"file\"))\n self.assertTrue(hasattr(args, \"cpu\"))\n self.assertTrue(hasattr(args, \"diskQ\"))\n self.assertTrue(hasattr(args, \"disks\"))\n self.assertTrue(hasattr(args, \"iowait\"))\n self.assertTrue(hasattr(args, \"throughput\"))", "def test_command_verify():\n wozardry.parse_args([\"verify\", kValid1])\n wozardry.parse_args([\"verify\", kValid2])", "def test_py2hex_runtime_arg():\n with mock.patch('uflash.flash') as mock_flash:\n uflash.py2hex(argv=['tests/example.py', '-r', 'tests/fake.hex'])\n mock_flash.assert_called_once_with(path_to_python='tests/example.py',\n path_to_runtime='tests/fake.hex',\n paths_to_microbits=['tests'],\n minify=False,\n keepname=True)", "def test_update_system(self):\n pass", "def test_arg_version(run_nait) -> None: # type: ignore\n expected = nanaimo.version.__version__\n assert run_nait(['--version']).stdout.decode('utf-8').startswith(expected)", "def test_usage_01(self):\n\n usage = self.sqlbak([])\n\n assert \"usage: sqlbak directory\" in usage\n assert \"[--tarname=/path/to/tar]\" in usage\n assert \"[--ms-towait=ms]\" in usage\n assert \"[--dry-run]\" in usage\n assert \"[--integrity-check]\" in usage\n assert \"[--follow-links]\" in usage\n assert \"[--verbose]\" in usage\n assert \"[--version]\" in usage", "def test_wait_for_upgrade(self):\n self.run_test_suites(self.wait_for_upgrade_test_suite_list)", "def test_main_no_args():\n with mock.patch('sys.argv', ['uflash', ]):\n with mock.patch('uflash.flash') as mock_flash:\n uflash.main()\n mock_flash.assert_called_once_with(path_to_python=None,\n paths_to_microbits=[],\n path_to_runtime=None,\n minify=False,\n keepname=False)", "def downgrade():\n pass", "def downgrade():\n pass", "def test_update_no_version_change(\n dbbackup, update, version_file=None, orig_version=None\n):\n version_file = cli.version_file()\n open(version_file, \"w\").write(orig_version)\n cli.initialize()\n update.assert_not_called()\n dbbackup.assert_not_called()", "def test_options(tmpdir):\n ubuild = \"\"\"\ndef main(build):\n print(\"main\")\n print(build.options.args)\n\ndef test(build):\n print(\"test\")\n print(build.options.args)\n\"\"\".strip()\n\n # we need to create a virtualenv\n tmpdir.join(\"ubuild.py\").write(ubuild)\n _, out, err = execute_script(\n \"uranium_standalone\",\n \"--uranium-dir\",\n URANIUM_SOURCE_ROOT,\n \"main\",\n \"foo\",\n cwd=tmpdir.strpath,\n )\n\n assert \"main\" in out.decode(\"UTF-8\")\n assert \"foo\" in out.decode(\"UTF-8\")\n\n _, out, err = execute_script(\n \"uranium_standalone\",\n \"--uranium-dir\",\n URANIUM_SOURCE_ROOT,\n \"main\",\n \"foo\",\n cwd=tmpdir.strpath,\n )\n\n assert \"test\" in out.decode(\"UTF-8\")\n assert \"foo\" in out.decode(\"UTF-8\")", "def _check_args(self):\n if not self.use_binaries + self.use_installer + self.use_neurodebian:\n raise ValueError(\"Please specify installation method.\")\n if self.use_binaries + self.use_installer + self.use_neurodebian > 1:\n raise ValueError(\"More than one installation method specified.\")\n if self.use_installer and self.pkg_manager != 'yum':\n raise ValueError(\"FSL's Python installer does not work on \"\n \"Debian-based systems.\")\n if self.use_neurodebian and self.os_codename is None:\n raise ValueError(\"`os_codename` must be defined to install FSL \"\n \"through NeuroDebian.\")\n return True", "def test_invalidargs(clickrunner):\n for args in maincli.invalid_args:\n result = clickrunner.invoke(maincli.entrypoint, args)\n assert result.exit_code == 2\n assert \"no such option\" in result.output", "def upgrade(self) -> Optional[pulumi.Input['UpgradeNoteArgs']]:\n return pulumi.get(self, \"upgrade\")", "def _package_upgrades(args, env_attrs):\n\n overrides = env_attrs.get('override_attributes')\n if overrides.get('osops'):\n osops = overrides['osops']\n else:\n osops = overrides['osops'] = {}\n\n if args.get('disable_pkg_upgrades') is True:\n osops['do_package_upgrades'] = False\n else:\n osops['do_package_upgrades'] = True\n return env_attrs", "def main(args):", "def main(args):", "def handle_arguments(self, args):\n debug(\"BloomGenerator.handle_arguments: got args -> \" + str(args))", "def test_vargs(self):", "def test_py2hex_one_arg():\n with mock.patch('uflash.flash') as mock_flash:\n uflash.py2hex(argv=['tests/example.py'])\n mock_flash.assert_called_once_with(path_to_python='tests/example.py',\n path_to_runtime=None,\n paths_to_microbits=['tests'],\n minify=False,\n keepname=True)", "def test_upgrade_to_non_registered(self):\n with pytest.raises(\n ClickException,\n match=r\".* with id .* is not registered. Please use the `add` command. Aborting...\",\n ):\n self.runner.invoke(\n cli,\n [\n \"-v\",\n \"DEBUG\",\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n \"nonexits/dummy:0.0.0\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )", "def main(args=None):", "def main(args=None):", "def test_update9(self):\n pass", "def setBootargs(self):\n\t\tif self.testType == 'auto' or self.testType == 'manual':\n\t\t\tself.bootargs = self.settings.getKeyValue('nfs.fs')\n\t\t\tself.bootargs = self.bootargs.replace('<nfsroot>', self.nfsroot)\n\t\t\tself.bootargs = self.bootargs.replace('<ipaddr>', self.ipaddr)\n\t\t\tself.socket.send('setenv bootargs ' + self.bootargs+'\\r', 1)\n\t\telse:\n\t\t\tself.bootargs = self.settings.getKeyValue('ramdisk.fs')\n\t\t\tself.bootargs = self.bootargs.replace('<ipaddr>', self.ipaddr)\n\t\t\tself.socket.send('setenv bootargs ' + self.bootargs+'\\r', 1)\t\t\n\t\treturn None", "def apply_args(self):\n\n args = self.args\n\n Test.compile_only = args.compile_only\n Test.skip_comparison = args.skip_comparison\n Test.global_tolerance = args.tolerance\n Test.global_abs_tolerance = args.abs_tolerance\n Test.global_particle_tolerance = args.particle_tolerance\n Test.performance_params = args.check_performance", "def createVersionForSetup(self, *arg, **properties):\n# result = DrawingImporter.createVersionForSetup(properties[\"show\"], properties[\"sequence\"], properties[\"beat\"], properties[\"setup\"]);\n# return result\n return True", "def main(args: argparse.Namespace) -> None:\n if args.is_rc and args.is_dev:\n raise ValueError(\"A release version cannot be both RC and dev.\")\n if args.is_rc:\n assert args.rc is not None, \"rc field must be specified if is_rc is specified\"\n assert args.rc >= 1, \"RC version must start from 1.\"\n else:\n assert args.rc is None, \"is_rc must be specified in order to specify rc field\"\n update_cmake(args.major, args.minor, args.patch)\n update_pypkg(\n args.major,\n args.minor,\n args.patch,\n is_rc=args.is_rc,\n is_dev=args.is_dev,\n rc_ver=args.rc,\n )", "def fill_args(cls, toolchain, parser):\n pass # pass must be overloaded (if required)", "def install_or_upgrade():\n global action_url\n global cgi_executable_path\n global documentation_path\n global full_python_path\n global private_data_directory\n if get_script_mode() == \"install\":\n output(\"\\nInstalling...\")\n elif get_script_mode == \"upgrade\":\n output(\"\\nUpgrading...\")\n if get_script_mode() == \"install\":\n os.system(\"rm -rf \" + private_data_directory)\n os.system(\"mkdir -p \" + private_data_directory)\n os.system(\"rm -rf \" + private_data_directory)\n os.system(\"cp -r data \" + private_data_directory)\n os.system(\"chmod 755 \" + private_data_directory + \"/*\")\n os.system(\"chmod 1777 \" + private_data_directory)\n\tsubstitute(private_data_directory + \"/footer.html\", \"ACTION_URL\", \\\n\t action_url)\n elif get_script_mode() == \"upgrade\":\n\tpass\n os.system(\"rm -rf \" + cgi_executable_path)\n os.system(\"mkdir -p \" + cgi_executable_path)\n os.system(\"rm -rf \" + cgi_executable_path)\n os.system(\"cp bin/mobile \" + cgi_executable_path)\n substitute(cgi_executable_path, \"DOCUMENT_ROOT_PATH\", \\\n private_data_directory)\n substitute(cgi_executable_path, \"FULL_ACTION_URL\", action_url)\n substitute(cgi_executable_path, \"FULL_PYTHON_PATH\", full_python_path)\n os.system(\"chmod 0755 \" + cgi_executable_path)\n if get_script_mode() == \"install\":\n substitute(\"doc/README\", \"<the private Mobile Web Proxy data directory>\", \\\n private_data_directory)\n os.system(\"mkdir -p \" + documentation_path)\n os.system(\"rm -rf \" + documentation_path)\n os.system(\"mkdir -p \" + documentation_path)\n os.system(\"cd doc; cp -rp * \" + documentation_path)\n if get_script_mode() == \"install\":\n output(\\\n\"\"\"\n\nInstallation is complete. Further information about the Mobile Web Proxy is\navailable in \"\"\" + documentation_path + \"\"\"/README.\n\nThank you for using the Mobile Web Proxy!\n\n\"\"\")\n if get_script_mode() == \"upgrade\":\n output(\\\n\"\"\"\n\nThe upgrade is complete. Further information about the Mobile Web Proxy is\navailable in \"\"\" + documentation_path + \"\"\"/README file.\n\nThank you for using the Mobile Web Proxy!\n\n\"\"\")", "def test_with_defaults():\n runner = CliRunner()\n result = runner.invoke(main, [\"fix_me\"])\n assert result.exit_code == 0\n assert not result.exception", "def __validate_upgrade_type(self):\n if self.upgrade_type not in self.upgrade_function.keys():\n self.fail(\"Unsupported upgrade_type: %s\" % self.upgrade_type)", "def test_main_first_arg_python():\n with mock.patch('uflash.flash') as mock_flash:\n uflash.main(argv=['foo.py'])\n mock_flash.assert_called_once_with(path_to_python='foo.py',\n paths_to_microbits=[],\n path_to_runtime=None,\n minify=False,\n keepname=False)", "def test_main_wrong_args(self):\n wrong_args_list = [\n [],\n [\"--layout\", self.layout_single_signed_path],\n [\"--key\", self.alice_path],\n ]\n\n for wrong_args in wrong_args_list:\n self.assert_cli_sys_exit(wrong_args, 2)", "def test_install(self):\n pass", "def test_update(update_command, first_app, second_app):\n # Configure no command line options\n options = update_command.parse_options([])\n\n update_command(**options)\n\n # The right sequence of things will be done\n assert update_command.actions == [\n # Host OS is verified\n (\"verify-host\",),\n # Tools are verified\n (\"verify-tools\",),\n # App configs have been finalized\n (\"finalize-app-config\", \"first\"),\n (\"finalize-app-config\", \"second\"),\n # Update the first app\n (\"verify-app-template\", \"first\"),\n (\"verify-app-tools\", \"first\"),\n (\"code\", \"first\", False),\n (\"cleanup\", \"first\"),\n # Update the second app\n (\"verify-app-template\", \"second\"),\n (\"verify-app-tools\", \"second\"),\n (\"code\", \"second\", False),\n (\"cleanup\", \"second\"),\n ]", "def test_xfail_with_run_false_and_with_reason():\n pass", "def test_version_check_update_available(self):\n output = self.run_command(\"selfupdate --check bennr01:selfupdate_test_future\", exitcode=0)\n self.assertIn(\"Target: bennr01:selfupdate_test_future\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Already at latest version\", output)\n self.assertIn(\"New version available\", output)\n self.assertNotIn(\"Error: \", output)", "def test_upgrade_apply_from_previous(setup, platform, skuba):\n\n setup_kubernetes_version(skuba, PREVIOUS_VERSION)\n\n outs = {}\n for (r, n) in [(\"master\", 0), (\"worker\", 0)]:\n node = \"my-{}-{}\".format(r, n)\n outs[node] = skuba.node_upgrade(\"apply\", r, n)\n\n master = outs[\"my-master-0\"]\n assert master.find(\"successfully upgraded\") != -1\n\n worker = outs[\"my-worker-0\"]\n assert worker.find(\"successfully upgraded\") != -1", "def test_main_optional_args(self):\n args = [\"in_toto_keygen.py\"]\n password = \"123456\"\n with patch.object(sys, 'argv', args + [\"-p\", \"bob\"]), \\\n patch(\"getpass.getpass\", return_value=password), self.assertRaises(\n SystemExit):\n in_toto_keygen_main()\n with patch.object(sys, 'argv', args + [\"-p\", \"bob\", \"3072\"]), \\\n patch(\"getpass.getpass\", return_value=password), self.assertRaises(\n SystemExit):\n in_toto_keygen_main()", "def test_cell_update_missing_required_args(self):\n expected_responses = [\n '.*?^usage: craton cell-update',\n '.*?^craton cell-update: error:.*$',\n ]\n stdout, stderr = self.shell('cell-update')\n actual_output = stdout + stderr\n for r in expected_responses:\n self.assertThat(actual_output,\n matchers.MatchesRegex(r, self.re_options))", "def test_version_01(self):\n\n version = self.sqlbak([\"--version\"])\n self.assertTrue(\"sqlbak v\" in version)", "def init_func(args):\n try:\n print(\"[ + ] Updating modules!\\n\")\n subprocess.run([\"php\", \"bin/magento\", \"setup:upgrade\"])\n print(\"[ + ] Compiling!\\n\")\n subprocess.run([\"php\", \"bin/magento\", \"setup:di:compile\"])\n\n for arg in args:\n print(\"Deploy using\", arg)\n subprocess.run([\"php\", \"bin/magento\", \"setup:static-content:deploy\", \"-f\", arg])\n except FileNotFoundError:\n print(\"Wrong file or file path for php!\")", "def test_upgrade_apply_all_fine(setup, platform, skuba):\n\n setup_kubernetes_version(skuba)\n\n # node upgrade apply\n outs = {}\n for (r, n) in [(\"master\", 0), (\"worker\", 0)]:\n node = \"my-{}-{}\".format(r, n)\n outs[node] = skuba.node_upgrade(\"apply\", r, n)\n\n master = outs[\"my-master-0\"]\n assert master.find(\n \"Node my-master-0 is up to date\"\n ) != -1\n\n worker = outs[\"my-worker-0\"]\n assert worker.find(\n \"Node my-worker-0 is up to date\"\n ) != -1", "def test_uninstall(self):\n pass", "def test_update_software_asset_install_script(self):\n pass", "def set_args(self, c_args):\n if c_args.host and c_args.host in config.config.HOSTS:\n # use a known host\n self.host = config.config.HOSTS.get(c_args.host)\n elif c_args.host:\n # use a given url\n self.host = c_args.host\n else:\n # default, use local host\n self.host = config.config.HOSTS.get('local')\n\n for pathname in c_args.test:\n self.tests += utils.jsonschemas.load_and_validate_test(pathname)\n\n self.check_result = not c_args.only_structure\n self.start_pos = int(c_args.one_based)\n\n self.version = c_args.version.replace('.', '')\n spec_versions = VERSIONS[self.version]\n if c_args.no_openapi:\n spec_path = ''\n else:\n if os.path.isfile(config.config.SPEC):\n logging.info('Using Beacon specification in %s', config.config.SPEC)\n spec_path = config.config.SPEC\n with open(spec_path) as stream:\n self.openapi = parse_spec(stream)\n else:\n logging.info('Downloading Beacon specification')\n try:\n spec_url = SPEC_URL.format(version=spec_versions['ga4gh'])\n spec_path = urllib.request.urlopen(spec_url).read()\n self.openapi = parse_spec(spec_path)\n except urllib.error.URLError:\n logging.warning('Could not download %s.'\n 'Will not validate against the OpenAPI Specification.',\n spec_url)\n spec_path = ''\n\n if spec_path:\n server = openapi_core.schema.servers.models.Server(self.host)\n self.openapi.servers.append(server)\n\n if c_args.no_json:\n self.use_json_schemas = False\n else:\n if os.path.isdir(config.config.SCHEMAS):\n logging.info('Using JSON schemas in %s', config.config.SCHEMAS)\n self.json_schemas['response'] = load_local_schema('response')\n self.json_schemas['query'] = load_local_schema('query')\n self.json_schemas['info'] = load_local_schema('info')\n else:\n logging.info('Downloading JSON schemas')\n\n def load_url(qtype):\n path = JSON_URL.format(querytype=qtype, version=spec_versions['CSCfi'])\n return json.loads(urllib.request.urlopen(path).read())\n\n self.json_schemas['response'] = load_url('response')\n self.json_schemas['query'] = load_url('query')\n self.json_schemas['info'] = load_url('info')\n logging.info('\\n')", "def test_get_short_version(self):\n pass", "def test_update_with_requirements(update_command, first_app, second_app):\n # Configure a requirements update\n options = update_command.parse_options([\"-r\"])\n\n update_command(**options)\n\n # The right sequence of things will be done\n assert update_command.actions == [\n # Host OS is verified\n (\"verify-host\",),\n # Tools are verified\n (\"verify-tools\",),\n # App configs have been finalized\n (\"finalize-app-config\", \"first\"),\n (\"finalize-app-config\", \"second\"),\n # Update the first app\n (\"verify-app-template\", \"first\"),\n (\"verify-app-tools\", \"first\"),\n (\"code\", \"first\", False),\n (\"requirements\", \"first\", False),\n (\"cleanup\", \"first\"),\n # Update the second app\n (\"verify-app-template\", \"second\"),\n (\"verify-app-tools\", \"second\"),\n (\"code\", \"second\", False),\n (\"requirements\", \"second\", False),\n (\"cleanup\", \"second\"),\n ]", "def test_update_hyperflex_hxdp_version(self):\n pass", "def test_update_hyperflex_software_version_policy(self):\n pass", "def test_fw_version(mocker):\n mocker.patch('serial.Serial.open')\n mocker.patch('serial.Serial.flushInput')\n mocker.patch('pysds011.driver.SDS011.cmd_set_sleep')\n mocker.patch('pysds011.driver.SDS011.cmd_set_mode')\n cfv = mocker.patch('pysds011.driver.SDS011.cmd_firmware_ver')\n cfv.return_value = {'pretty': 'BimBumBam'}\n runner = CliRunner()\n result = runner.invoke(main, ['fw-version'])\n\n assert 'FW version' in result.output\n assert 'BimBumBam' in result.output\n assert result.exit_code == 0", "def test_enforcement_mode_update_command_when_blank_arguments_provided(\n err_msg, args, mock_client\n):\n with pytest.raises(ValueError) as err:\n update_enforcement_mode_command(mock_client, args)\n assert str(err.value) == err_msg", "def test_py2hex_minify_arg():\n with mock.patch('uflash.flash') as mock_flash:\n uflash.py2hex(argv=['tests/example.py', '-m'])\n mock_flash.assert_called_once_with(path_to_python='tests/example.py',\n path_to_runtime=None,\n paths_to_microbits=['tests'],\n minify=True,\n keepname=True)", "def do_the_best_things(args):\n print('got args', args)", "async def test_warn_upgrade_new_install(config: Config, time: Time):\n cache = DataCache(config, time)\n assert not cache.notifyForIgnoreUpgrades\n assert cache._config.get(Setting.IGNORE_UPGRADE_BACKUPS)", "def requires_setup(step, setup_names):\r\n pass", "def test_twentythree_no_args():\n sys.argv = ['test']\n with pytest.raises(SystemExit):\n TwentyThree()", "def cmd_appe(args):" ]
[ "0.6306604", "0.6163344", "0.583488", "0.5804764", "0.57716423", "0.57552814", "0.57459766", "0.5694769", "0.5651118", "0.5639809", "0.55904186", "0.557991", "0.55499375", "0.5547003", "0.5539552", "0.55363226", "0.5525847", "0.55074257", "0.5484567", "0.5458392", "0.5432542", "0.5426635", "0.54240566", "0.54198194", "0.541484", "0.5414215", "0.538606", "0.537156", "0.53650457", "0.53643", "0.5357345", "0.5351801", "0.5341828", "0.53327173", "0.53301466", "0.5313886", "0.5312014", "0.5304052", "0.5297192", "0.5295143", "0.52904624", "0.52901816", "0.52829456", "0.52652246", "0.5264791", "0.5259534", "0.5250255", "0.5249895", "0.5248253", "0.5248253", "0.5243115", "0.5241949", "0.5239941", "0.52327317", "0.5215812", "0.5214265", "0.52132916", "0.52132916", "0.5210869", "0.5204976", "0.5203138", "0.5193156", "0.51845413", "0.51845413", "0.5184449", "0.5179827", "0.5178599", "0.51767975", "0.51726377", "0.51661575", "0.5159774", "0.51535624", "0.51511836", "0.51425546", "0.51317877", "0.51224285", "0.51174384", "0.5116584", "0.5113596", "0.50896126", "0.50896096", "0.50887036", "0.5087577", "0.5083976", "0.50823903", "0.5081929", "0.5075903", "0.5073842", "0.5065112", "0.5064391", "0.506306", "0.50601506", "0.5060044", "0.5057753", "0.5056764", "0.50551057", "0.50550574", "0.5053919", "0.5052743", "0.50526583" ]
0.6882471
0
Extracts the value between marker and index
def extract(self): # type: () -> str if self.end(): return self._src[self._marker :] else: return self._src[self._marker : self._idx]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, position):\n return self.numbers[position[0]][position[1]]", "def __getitem__(self, index):\n return self.data[index[0] - 1][index[1] - 1]", "def get_value_at_index(self, index, cc):\n tl = cc.dsget(self.title)\n return (tl[index], None)", "def marker_at_position(self, position):\n\n return self._slice(position, (1,1))[0][0]", "def find_offset(self,value):\n return self.header.find_offset(value)", "def __getitem__(self,index):\n return self._data[index[0]][index[1]]", "def get_value_by_index(self, index):\n return self['value'][index]", "def __getitem__(self, index):\n return self.position[index]", "def __getitem__(self, k):\n start = k.start # deal with [:stop] slices\n if start is None:\n start = self.start\n stop = k.stop # deal with [start:] slices\n if stop is None:\n stop = self.stop\n\n annoID = self._get_anno_id(start)\n a = self.db.annodb[annoID] # get TranslationAnnot object\n s = a.sequence # corresponding nucleotide region\n\n return a[(start - s.start) / 3: (stop - s.start) / 3]", "def GetOffset(self, entry_index):\n return self._offsets[entry_index]", "def __getitem__(self, index):\n return self.values[index]", "def get_value(self, x, y, z):\n\t\treturn self.data[ self.xyz_to_offset(x,y,z) ]", "def __getitem__(self, index):\n return self._value_at(index)", "def __getitem__(self, index):\n return self._value_at(index)", "def get_substr(self, i, j):\n return self._val[(i-1):j]", "def __getitem__(self, index):\n return self.seq[index]", "def index(self, value, start=0, stop=-1):\n return self.elem.index(value, start, stop)", "def at(self):\n return self.data[self.end]", "def entry(self, i, j):\n return self.data[self.columns * (i - 1) + j - 1]", "def __getitem__(self, j):\n\t\treturn self._coords[j]", "def getelem(self,num):\n #return self.M.conf()['elements'][num]\n return self.lat[num]", "def get_value_at_index(self, index, cc):\n high = cc.dsget('high')\n low = cc.dsget('low')\n return (high[index], low[index])", "def get(self, index):\n\n return self.values[index]", "def extract(string, start_marker, end_marker):\n start_loc = string.find(start_marker)\n end_loc = string.find(end_marker)\n if start_loc == -1 or end_loc == -1:\n return \"\"\n return string[start_loc+len(start_marker):end_loc]", "def getValueAt(*args):\n return _osgAnimation.OutExpoFunction_getValueAt(*args)", "def getValueAt(*args):\n return _osgAnimation.OutBackFunction_getValueAt(*args)", "def __getitem__ (self, idx):\n return self.row(idx[0])[idx[1]]", "def pos(self):\n return self.info['value']", "def dd_start_value_map_nb(record, ts):\n return ts[record['start_idx'], record['col']]", "def offset(self):\r\n return self.buf[0].unib[9:11]", "def getValueAt(*args):\n return _osgAnimation.InBackFunction_getValueAt(*args)", "def get_cell_value(self, index):\n x, y = index\n return self.grid[y][x]", "def __getitem__(self, index):\n return self.cellData[index]", "def __getitem__(self, i):\n return self.data[i]", "def get_location(self, idx):\n if self.locations:\n return self.locations[idx % len(self.locations)]\n else:\n return \"\"", "def __getitem__ ( self , index ):\n\t\treturn self . data [ index ]", "def process_index(index, intensity, interaction_symbol):\n return tuple(index.split(interaction_symbol))", "def get_start(i,v):\n return i-v[i]-1", "def __getitem__(self, index):\n return self.data[index]", "def __getitem__(self, index):\n return self.data[index]", "def getValueAt(*args):\n return _osgAnimation.InOutBackFunction_getValueAt(*args)", "def getValueAt(*args):\n return _osgAnimation.InOutExpoFunction_getValueAt(*args)", "def __getitem__(self, index):\n return self._nums[index]", "def __getitem__(self, ind):\n if not isinstance(ind, (str, unicode)):\n raise TypeError('Supply a valid str for the index')\n if self.indices[0] == ind:\n return self.x\n if self.indices[1] == ind:\n return self.y\n if self.indices[2] == ind:\n return self.z\n else:\n raise ValueError('Not a defined index')", "def __getitem__(self, index):\n\t\treturn self.data[index]", "def get_item(array, index):\n row, column = index\n return array[row][column]", "def __getitem__(self,idx):\n return self.g[idx]", "def __getitem__(self, idx):\n return self._data[idx]", "def get_dna_value(self, index: int):\n return self.dna[index]", "def getValueAt(*args):\n return _osgAnimation.InExpoFunction_getValueAt(*args)", "def __findPlaceholder(self, data, index):\r\n m = self.__placeholder_re.search(data, index)\r\n if m:\r\n return m.group(1), m.end()\r\n else:\r\n return None, index + 1", "def xval(self, i):\n return self.x[i]", "def __getitem__(self, index):\n return self.points[index]", "def getValueAt(*args):\n return _osgAnimation.LinearFunction_getValueAt(*args)", "def __getitem__(self, k):\n return self._coords[k]", "def get(self, index):\n if 0 <= index <= len(self.nums):\n return self.nums[index]\n return -1", "def _get_marker_indices(marker, line):\n indices = [i for i, ltr in enumerate(line) if ltr == marker]\n start = indices[0:-1:2]\n end = [i + 1 for i in indices[1::2]]\n assert len(start) == len(end)\n return start, end", "def __getitem__(self, index):\n if isinstance(index, (tuple, list)) and len(index) == 2:\n return self.cells[index[1]][index[0]]\n return self.cells[index]", "def _getindicator(self, index: int) -> int:\n bitmask = 1 << (index + 1)\n return self._get_buffer(0x04) & bitmask", "def __getitem__(self,i):\n\t\treturn self.series[i]", "def getValueAt(*args):\n return _osgAnimation.OutQuartFunction_getValueAt(*args)", "def __getitem__(self, index: Any) -> Any:\n return self.contents[index]", "def __getitem__(self, data):\n i,j = data\n return self._data[i][j]", "def __getitem__(self, label_value: int) -> 'SegmentInfo':\n return self.infos[label_value]", "def __getitem__(self, position):\n\n return self.data[position]", "def __getitem__(self, index):\n if index == 0:\n return self.x\n elif index == 1:\n return self.y\n raise IndexError", "def find_value_at(list, indx):\n row = list[indx]\n val = row.find_all(\"td\")[1].string[:-1]\n return float(val.replace(',', '.'))", "def getValueAt(*args):\n return _osgAnimation.OutCircFunction_getValueAt(*args)", "def __getitem__(self, idx):\n if not isinstance(idx, slice):\n return self._fetch()[idx]\n return self._fetch()[idx.start:idx.stop]", "def value(self):\n return self.alignment.matching[self.idx]", "def _fetch_value(cursor, index=1):\n return cursor.fetchone().popitem()[index]", "def getidx(self, ind: Union[str, int]) -> List[Any]:\n output = []\n for data in self.data:\n output.append(data[ind])\n return output", "def _value_at(self, index):\n node = self._get_node_at(index)\n if node is None:\n raise IndexError('List index out of range.')\n return node.value", "def _value_at(self, index):\n node = self._get_node_at(index)\n if node is None:\n raise IndexError('List index out of range.')\n return node.value", "def LinearFunction_getValueAt(*args):\n return _osgAnimation.LinearFunction_getValueAt(*args)", "def OutExpoFunction_getValueAt(*args):\n return _osgAnimation.OutExpoFunction_getValueAt(*args)", "def __getitem__(self, i):\n return self.get(i, i + 1)", "def Offset(self) -> int:", "def Offset(self) -> int:", "def Offset(self) -> int:", "def __getitem__(self, index):\n return (index, self.data_cube[0, index, :])", "def __getpos__(self, num):\n return self.num_to_pos[num]", "def _extract_from_arn(arn, position):\n\n return re.findall(\"(.*?):\", arn)[position]", "def __getitem__(self, i):\n return self._ar[i]", "def getValueAt(*args):\n return _osgAnimation.InQuartFunction_getValueAt(*args)", "def __getitem__(self, index):\n\n return self.user_item_coordinates[index, :], self.rating[index]", "def getValueAt(*args):\n return _osgAnimation.InCircFunction_getValueAt(*args)", "def getValueAt(*args):\n return _osgAnimation.InOutQuartFunction_getValueAt(*args)", "def get_index(self, index):\n return self.get_node_from_index(index).data", "def __call__(self, pos):\n return self.__getitem__(pos)", "def __call__(self, pos):\n return self.__getitem__(pos)", "def __getitem__(self, rc):\r\n row, col = rc\r\n index = self.row_column_to_index(row, col)\r\n return self.values[index]", "def __getitem__(self, index):\n return self.array[index]", "def __getitem__(self, idx):\n return (self.sequence_data[idx], self.data_len[idx]), self.labels[idx]", "def OutBackFunction_getValueAt(*args):\n return _osgAnimation.OutBackFunction_getValueAt(*args)", "def index(self, value, start=None, stop=None): # real signature unknown; restored from __doc__\n return 0", "def __getitem__(self, index):\n # attempt to\n try:\n # cast {index} to an integer\n index = int(index)\n # if this fails\n except TypeError:\n # ask my tile do the rest\n value = self.data[self.tile.offset(index)]\n # otherwise\n else:\n # retrieve the item directly from my container\n value = self.data[index]\n # all done\n return value", "def InExpoFunction_getValueAt(*args):\n return _osgAnimation.InExpoFunction_getValueAt(*args)", "def getidx(self, ind: Union[str, int]) -> List[Any]:\n output = []\n for data in self.data:\n output.append(data[ind])\n\n return output", "def get(self, index):\n count = 0\n x = self.begin\n\n while count != index:\n x = x.next\n count += 1\n\n return x.value" ]
[ "0.63515353", "0.63282895", "0.625605", "0.62464094", "0.62333435", "0.6199576", "0.6177496", "0.6158412", "0.6130857", "0.6084534", "0.606006", "0.6025226", "0.6003024", "0.6003024", "0.5981887", "0.59495384", "0.5900323", "0.58751357", "0.5841193", "0.58206975", "0.5820006", "0.5809314", "0.5808822", "0.57966477", "0.5774062", "0.57731545", "0.5748854", "0.57315874", "0.5730966", "0.5730651", "0.5730342", "0.5726519", "0.5720796", "0.57172424", "0.5710061", "0.57073694", "0.5697994", "0.5689452", "0.5678575", "0.5678575", "0.56747836", "0.5672436", "0.56695545", "0.56683326", "0.5667375", "0.5667052", "0.5663132", "0.5659187", "0.56590086", "0.56576604", "0.5654503", "0.5648295", "0.5646906", "0.5635594", "0.56269217", "0.56131065", "0.56058526", "0.5602694", "0.55939263", "0.55933666", "0.5590977", "0.5588011", "0.55847853", "0.5583747", "0.55802035", "0.55793375", "0.5579059", "0.5567937", "0.55556035", "0.55510384", "0.5545889", "0.5545889", "0.55448204", "0.55448204", "0.5542048", "0.55411506", "0.55396026", "0.5535873", "0.5535873", "0.5535873", "0.55350655", "0.55296856", "0.5526168", "0.5506674", "0.55055636", "0.5501235", "0.549773", "0.54956317", "0.54954106", "0.5493391", "0.5493391", "0.54772705", "0.5477181", "0.54765284", "0.5474157", "0.54682827", "0.54644537", "0.54628575", "0.54628384", "0.5460981" ]
0.6038684
11
Increments the parser if the end of the input has not been reached. Returns whether or not it was able to advance.
def inc(self): # type: () -> bool try: self._idx, self._current = next(self._chars) return True except StopIteration: self._idx = len(self._src) self._current = TOMLChar("\0") return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def done_parsing(self):\n # STUDENT\n return (self.input_buffer_len() == 1 ) and (self.stack_len()==1) \n # END STUDENT", "def _is_at_end(self):\n return self._peek().token_type == scanner.TokenType.EOF", "def has_next(self):\n try:\n self.next()\n return True\n except (ParseException, struct.error):\n return False", "def next(self):\r\n\t\tself.index += 1\r\n\t\treturn not self.eof()", "def end_of_input():\n return at_end.bind(lambda end:\n Parser(lambda chunk, last: ParserResult.from_error(\"Not end of input\"))\n if not end else Parser.unit(None))", "def has_next(self):\n # type: () -> bool\n return len(self.buffer) > 0", "def end(self): # type: () -> bool\n return self._idx >= len(self._src) or self._current == \"\\0\"", "def eof(self):\r\n\t\treturn self.index == len(self.data)", "def hasNext(self):\n return bool(self.peek())", "def at_eof(self) -> bool:\n ...", "def at_eof(self) -> bool:\n ...", "def at_eof(self) -> bool:\n ...", "def at_eof(self) -> bool:\n ...", "def has_finished(self) -> bool:\n return self.pos >= len(self.tokens)", "def hasNext(self) -> bool:\n if self.stack: return True\n else: return False", "def __advance(self, n=1):\r\n for i in range(n):\r\n if self.__tokenizer.has_more_tokens():\r\n self.__tokenizer.advance()\r\n continue\r\n return False\r\n return True", "def has_next(self):\n return not self.finished_function(self.peek)", "def hasNext(self) -> bool:\n return self.stack != []", "def has_next(self) -> bool:\n return self.peek() != self.sentinel", "def is_eof(self) -> bool:\n ...", "def hasNext(self) -> bool:\n\t\treturn bool(self.stack)", "def next(self):\n if self.done():\n return False\n opcodeArrayRef = opcode.opcodeArray\n\n op = opcodeArrayRef[self.script[self.offset]]\n if op.length == 1:\n # No additional data. Note that some of the opcodes, notably OP_1NEGATE,\n # OP_0, and OP_[1-16] represent the data themselves.\n self.offset += 1\n self.op = op\n self.d = ByteArray(b\"\")\n return True\n elif op.length > 1:\n # Data pushes of specific lengths -- OP_DATA_[1-75].\n script = self.script[self.offset :]\n if len(script) < op.length:\n self.err = DecredError(\n \"opcode %s requires %d bytes, but script only has %d remaining\"\n % (op.name, op.length, len(script))\n )\n return False\n\n # Move the offset forward and set the opcode and data accordingly.\n self.offset += op.length\n self.op = op\n self.d = script[1 : op.length]\n return True\n elif op.length < 0:\n # Data pushes with parsed lengths -- OP_PUSHDATA{1,2,4}.\n script = self.script[self.offset + 1 :]\n if len(script) < -op.length:\n self.err = DecredError(\n \"opcode %s requires %d bytes, but script only has %d remaining\"\n % (op.name, -op.length, len(script))\n )\n return False\n\n # Next -length bytes are little endian length of data.\n if op.length == -1:\n dataLen = script[0]\n elif op.length == -2:\n dataLen = script[:2].unLittle().int()\n elif op.length == -4:\n dataLen = script[:4].unLittle().int()\n else:\n self.err = DecredError(\"invalid opcode length %d\" % op.length)\n return False\n\n # Move to the beginning of the data.\n script = script[-op.length :]\n\n # Disallow entries that do not fit script or were sign extended.\n if dataLen > len(script) or dataLen < 0:\n self.err = DecredError(\n \"opcode %s pushes %d bytes, but script only has %d remaining\"\n % (op.name, dataLen, len(script))\n )\n return False\n\n # Move the offset forward and set the opcode and data accordingly.\n self.offset += 1 - op.length + dataLen\n self.op = op\n self.d = script[:dataLen]\n return True\n\n # The only remaining case is an opcode with length zero which is\n # impossible.\n raise AssertionError(\"unreachable\")", "def _check(self, token_type):\n if self._is_at_end():\n return False\n\n return self._peek().token_type == token_type", "def hasNext(self):\n return True if self.stack else False", "def hasNext(self) -> bool:\n return self.stack or self.node", "def hasNext(self) -> bool:\n return len(self.stack) > 0", "def hasNext(self) -> bool:\n return len(self.stack) > 0", "def hasNext(self) -> bool:\n return len(self.stack) > 0", "def _is_eof(self, symbol):\n if symbol.type == self.scanner.EOF:\n return True\n else:\n return False", "def advance(self):\n self.__token = \"\"\n if self.__i >= len(self.__lines):\n return\n while self.__i < len(self.__lines) and self.__lines[self.__i] in JackTokenizer.redundant: # advance as long as you see redundant chars\n self.__i += 1\n\n if self.__i >= len(self.__lines):\n return\n\n if self.__lines[self.__i] == \"\\\"\":\n self.update()\n while self.__lines[self.__i] != \"\\\"\": # str const\n self.update()\n self.update()\n return\n\n if self.__lines[self.__i].isdigit(): # int const\n while self.__lines[self.__i].isdigit():\n self.update()\n return\n\n if self.__i < (len(self.__lines) - 1) and self.__lines[self.__i:self.__i + 2] == \"//\": # comment\n while self.__i < len(self.__lines) and self.__lines[self.__i] != \"\\n\":\n self.__i += 1\n self.advance()\n return\n\n if self.__i < (len(self.__lines) - 1) and self.__lines[self.__i:self.__i + 2] == \"/*\": # comment\n self.__i += 1\n while self.__lines[self.__i:self.__i + 2] != \"*/\":\n self.__i += 1\n self.__i += 2\n self.advance()\n return\n\n if self.__i < (len(self.__lines) - 2) and self.__lines[self.__i:self.__i + 3] == \"/**\": # comment\n self.__i += 2\n while self.__lines[self.__i:self.__i + 2] != \"*/\":\n self.__i += 1\n self.__i += 2\n self.advance()\n return\n\n if self.__lines[self.__i] in JackTokenizer.symbols: # symbol\n self.update()\n return\n\n else: # other cases\n while self.__lines[self.__i] not in JackTokenizer.symbols and self.__lines[self.__i] not in \" \\t\\r\\n\":\n self.update()", "def hasNext(self) -> bool:\n return self.pointer < len(self.ordered_nodes)", "def hasNext(self) -> bool:\n return self.idx < len(self.m) - 1", "def hasNext(self) -> bool:\n return self.idx < len(self.m) - 1", "def eos(self):\n return self.pos == len(self.string)", "def eol(self):\n return self.pos == len(self.tokens)", "def has_next(self):\n regf = self.first_hbin().parent()\n if regf.hbins_size() + regf.first_hbin_offset() == self._offset_next_hbin:\n return False\n\n try:\n self.next()\n return True\n except (ParseException, struct.error):\n return False", "def hasNext(self):\n return self.iter < len(self.nums)", "def has_next():", "def isEOF(self):\n return _libsbml.XMLToken_isEOF(self)", "def match(self, token):\n try:\n if token == 'S' and is_symbol(self.the_input[self.index]) \\\n or self.the_input[self.index] == token:\n self.index += 1\n return True\n except IndexError:\n print 'Error on checking \\'' + token + \\\n '\\': the next token is empty'\n exit(1)\n print 'No' # there is improper grammar\n exit(1)", "def hasNextInt(self) -> bool:\n raise NotImplementedError", "def eof(self):\n try:\n next_line = self.read_pkt_line()\n except HangupException:\n return True\n self.unread_pkt_line(next_line)\n return False", "def hasNext(self) -> bool:\n return self.stack", "def hasNext(self) -> bool:\n return self.block.hasNext()", "def has_next():\n\n return True", "def hasNext(self) -> bool:\r\n return len(self.comb) > self.ind", "def _is_at_end(self):\n return self.current >= len(self.source)", "def reached_end_of_stream(self):\n pass", "def at_eof(self):\n return self._eof and not self._buffer", "def hasNext(self) -> bool:\n ...", "def eof(self):\n\t\tif not self._input: raise PlumberExceptions.PipeTypeException(self)\n\t\tresult = pservlet.pipe_eof(self._pipe_desc)\n\t\tif result > 0: return True\n\t\telif result == 0: return False\n\t\traise PlumberExceptions.PlumberNativeException(\"Cannot finish the API call to pipe_eof\")", "def _advance(self, idlist=None):\n if self.token.id == \"END\":\n return\n if idlist and self.token.id in idlist:\n self.token = next(self.token_gen)\n elif not idlist:\n self.token = next(self.token_gen)\n else:\n raise ParseError(\n \"\"\"Expected one of %s found %r instead. (line: %i)\"\"\"\n % (\" \".join(idlist), self.token.id, self.line)\n )", "def hasNext(self) -> bool:\n raise NotImplementedError", "def end_input(self):\n inp = input()\n if inp.upper() == \"Q\":\n return False\n if inp == \"\" \\\n \"\":\n return True\n return self.end_input", "def has_next(self) -> bool:\n if not self._exhausted:\n iter(self)\n\n return bool(self._queue or self._has_next_page())", "def at_eof(self):\n return self.tell() == len(self)", "def __bool__(self):\n return self.end < len(self.data)", "def has_next(self):\n return self.count < len(self)", "def done(self):\n return self.err is not None or self.offset >= len(self.script)", "def expect_eol(self):\n if self.length != 0:\n raise ParseError('Spurius words after parsing instruction')", "def _is_end(self, symbol):\n if symbol.id == self.scanner.END_ID:\n return True\n else:\n return False", "def hasNext(self) -> bool:\n return self.index + 1 < len(self.nodes_sorted)", "def advance(self):\n chars = ''\n tokenizer = LexerDfa()\n while self.pos < len(self.text):\n tokenizer.next_state(self.text[self.pos])\n if not tokenizer.finished:\n chars += self.text[self.pos]\n self.pos += 1\n else:\n self.current_token = Token(tokenizer.get_token_type(), chars.strip())\n return\n self.current_token = Token(EOF, None)", "def fullyConsumed(self):\n return len(self.__string) == self.__current_pos", "def next(self): # noqa A002\n return bool(self._ll_tree.next())", "def process_next_char(self): \n self.current_position += 1\n if self.current_position >= len(self.code_input):\n '''End of file since the position is equal to or greater than the input's position'''\n self.current_char = '\\0' #EOF\n print('end of line')\n self.current_char = self.code_input[self.current_position]", "def advance(self):\n if self.instr is not None:\n self.instr.opcode = self.instr.binary[25:]\n if opcode_decode[self.instr.opcode] == 'R-type':\n self.decode_rtype()\n elif opcode_decode[self.instr.opcode] == 'I-type' or opcode_decode[self.instr.opcode] == 'Load':\n self.decode_itype()\n else:\n raise SyntaxError(\"Invalid opcode\")", "def has_next(self) -> bool:\n return self._bin_iter.has_next()", "def next(self):\n if self.current and self.current.next:\n self.current = self.current.next\n return True\n return False", "def has_finished(self) -> bool:\n return self.pos >= len(self.s)", "def use_full_parser(text):\n end_of_header_match = _end_of_simple_header_pattern.search(text)\n return end_of_header_match is not None", "def eof(self):\n\t\treturn not self.is_alive() and self._queue.empty()", "def maybe_advance(self, expected_type):\n token = self._get_token()\n if token and token.type == expected_type:\n self.pos = token.pos\n return token.value\n return None", "def hasNext(self) -> bool:\n return True if len(self.inorder) > self.index else False", "def hasNext(self) -> bool:\n return self.elements != []", "def isEOF(self):\n return _libsbml.XMLInputStream_isEOF(self)", "def isFinished(self):\n\n currentValue = numpy.power(10, self.idxCurrentF / self.nbPtsF)\n if currentValue == 0:\n return True\n\n # It can be more than one line for the previous alignment value.\n # We iterate until we find a better value or to the end of the lines.\n for i in self:\n while i.nextLine[self.idx] > currentValue and not i.isFinished:\n i.next();\n \n return not any(i.nextLine[self.idx] <= currentValue for i in self)", "def advance(self):\n self.pos += 1\n if self.pos > len(self.text) - 1:\n self.current_char = None # Indicates end of input\n else:\n self.current_char = self.text[self.pos]", "def hasNext(self, n=1):\n return bool(self.peek(n - 1))", "def true(self):\n val = self.read(4)\n if val != b'true':\n self.on_parser_error(\"true token expected\")\n return True", "def hasNext(self) -> bool:\n return len(self.list) != 0", "def hasNext(self):\n if self.tree:\n return True\n else:\n return False", "def do_EOF(self, arg):\n \treturn True", "def isEnd(self):\n return _libsbml.XMLToken_isEnd(self)", "def at_end():\n def run(chunk, last):\n if chunk:\n return ParserResult.from_done(False, chunk, last)\n elif last:\n return ParserResult.from_done(True, chunk, last)\n else:\n return ParserResult.from_partial(Parser(run))\n return Parser(run)", "def next(self):\n self.pos += 1\n self.current_char = None if self.pos >= len(self.input) else self.input[self.pos]", "def has_next(self):\n if self.idx < len(self.nodes):\n return True\n else:\n return False", "def has_end(self):\n return bool(self._end)", "def parse(self, inp):\n\n tokens = self.tokenizer.tokenize(inp)\n tokens_left = len(tokens)\n\n # print(tokens)\n\n while tokens_left:\n\n for rule in self.grammar:\n tokens = tokens[rule.match(tokens):]\n\n if len(tokens) < tokens_left:\n tokens_left = len(tokens)\n else:\n # nothing is matching any more - stop\n break\n\n return len(tokens) == 0, tokens", "def parse_next_instruction(self) -> None:\n instruction = self.program[self.pointer]\n opcode = instruction % 100\n if opcode == 99:\n self.halt = True\n\n self.modes = instruction // 100\n\n if opcode == 1:\n self.op_sum()\n if opcode == 2:\n self.op_multiply()\n if opcode == 3:\n self.op_input()\n if opcode == 4:\n self.op_output()\n if opcode == 5:\n self.op_jump_if_true()\n if opcode == 6:\n self.op_jump_if_false()\n if opcode == 7:\n self.op_less_than()\n if opcode == 8:\n self.op_equal_to()\n if opcode == 9:\n self.op_adjust_relative()", "def has_more_commands(self):\n return not self.eof", "def eof(self):\n return not self.is_alive() and self._queue.empty()", "def has_next(self):\n return len(self.pile) > 0", "def iscomplete(self):\n if self.__start_line < 0:\n return False\n if self.__end_line < self.__start_line:\n return False\n return True", "def advance(self):\n line = self.stream.readline()\n while line is not None:\n # Strip comments or empty spaces\n line = re.sub('//.*', '', line).strip()\n\n # Avoid comments or empty lines\n if line != '':\n break\n\n line = self.stream.readline()\n\n if line is None:\n print \"No more commands.\"\n return\n\n self.current_command = line", "def hasNextBigInteger(self) -> bool:\n raise NotImplementedError", "def try_advance(self):\n if not self.step.toclick:\n self.step.finished = True\n return True\n return False", "def hasNextByte(self) -> bool:\n raise NotImplementedError", "def hasNextLine(self) -> bool:\n raise NotImplementedError", "def do_EOF(self, arg):\n return True" ]
[ "0.693617", "0.69233114", "0.69094247", "0.6850217", "0.6331117", "0.6285765", "0.620027", "0.61610067", "0.61490417", "0.6147039", "0.6147039", "0.6147039", "0.6147039", "0.6117384", "0.6101283", "0.6094416", "0.60582894", "0.60258627", "0.60254914", "0.6012133", "0.60018945", "0.59906393", "0.59468514", "0.5944689", "0.5934795", "0.5885448", "0.5885448", "0.5885448", "0.58846986", "0.58831316", "0.585524", "0.5853398", "0.5853398", "0.5793228", "0.578986", "0.57700825", "0.57608515", "0.5755913", "0.5752387", "0.574385", "0.5741151", "0.5733965", "0.5730419", "0.57269275", "0.57210803", "0.5702592", "0.5693606", "0.5658733", "0.5653617", "0.5651549", "0.5647885", "0.5645921", "0.5643281", "0.55808204", "0.5578173", "0.55627096", "0.5548155", "0.552837", "0.5524199", "0.55114", "0.5507179", "0.5494637", "0.5482841", "0.5467433", "0.5462119", "0.54548216", "0.5453051", "0.5442577", "0.5439151", "0.5415534", "0.5399512", "0.5393413", "0.53791654", "0.5378531", "0.53722066", "0.5357195", "0.5348179", "0.53412277", "0.5311001", "0.5305458", "0.53052485", "0.52991766", "0.5281908", "0.5279256", "0.5271198", "0.5270499", "0.5268999", "0.5254632", "0.5242956", "0.5242132", "0.5238217", "0.5232787", "0.5214152", "0.5213026", "0.52127945", "0.5210184", "0.5199454", "0.5196922", "0.5193735", "0.51821303" ]
0.64029455
4
Increments the parser by n characters if the end of the input has not been reached.
def inc_n(self, n): # type: (int) -> bool for _ in range(n): if not self.inc(): return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __advance(self, n=1):\r\n for i in range(n):\r\n if self.__tokenizer.has_more_tokens():\r\n self.__tokenizer.advance()\r\n continue\r\n return False\r\n return True", "def cmd_n(self,s):\n length = 0\n node = self.start\n while node is not None:\n line = node.element\n length += len(line)\n if line.find(s):\n self.cursor = node\n self.delta = line.find(s)\n break\n node = node.next\n self.get_text()", "def advance(self):\n if self.has_more_commands():\n self.counter += 1", "def read_nchars(string, n=1):\n return string[:n]", "def end(self):\n while self.position < len(self.document.characters\n ) and self.document.characters[\n self.position].character != '\\n':\n self.position += 1", "def advance(self):\n self.pos += 1\n if self.pos > len(self.text) - 1:\n self.current_char = None # Indicates end of input\n else:\n self.current_char = self.text[self.pos]", "def peek(string, n=0):\n return string[:n]", "def increment(self, n: int = 1):\n return Cursor(self.data, self.begin, self.end+n)", "def look_ahead(self, n: int = 1):\n return self.data[self.end:self.end+n]", "def grow(self, len):\n ret = libxml2mod.xmlParserInputBufferGrow(self._o, len)\n return ret", "def next(self):\n self.pos += 1\n self.current_char = None if self.pos >= len(self.input) else self.input[self.pos]", "def parse_line(self, line: str) -> None:\n self._count += 1", "def move_to_end(s, n):\n first=s[0:n]\n return s[n:] + first", "def inc_size(self):\r\n self.__length += 1", "def advance(self):\n if self.has_more_commands():\n self.counter += 1\n # if self._is_label():\n # self.counter += 1", "def _next_char(self):\r\n\r\n if self._index >= len(self._input_string):\r\n return None\r\n\r\n ret = self._input_string[self._index]\r\n self._index += 1\r\n return ret", "def advance(self):\n chars = ''\n tokenizer = LexerDfa()\n while self.pos < len(self.text):\n tokenizer.next_state(self.text[self.pos])\n if not tokenizer.finished:\n chars += self.text[self.pos]\n self.pos += 1\n else:\n self.current_token = Token(tokenizer.get_token_type(), chars.strip())\n return\n self.current_token = Token(EOF, None)", "def advance_n(self,n):\n print(self)\n for i in range(n):\n self.advance_one()\n print(self)", "def enearn(self, n: int) -> int:\n result = self._read_inline(f\"enearn({n})\")\n return int(result)", "def eat(seq, n=None):\n if n is None:\n collections.deque(seq, maxlen=0)\n else:\n next(itertools.islice(seq, n, n), None)", "def read(self, n=1):\n return self.string[self.pos:self.pos + n]", "def expand_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n cutText = _expand_after_special_chars(cutText)\n reg = re.compile(\n r'([a-zA-Z0-9_\\\"\\'\\)][=\\+\\-\\*/\\%]|[=\\+\\-\\*/\\%][a-zA-Z0-9_\\\"\\'\\(])')\n hit = reg.search(cutText)\n count = 0\n while hit and count < 10:\n cutText = cutText[:hit.start() + 1] + ' ' + \\\n cutText[hit.end() - 1:]\n hit = reg.search(cutText)\n count += 1\n newText = cutText\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)", "def nextNumberOfResults(self, N=10):\n self.start += self.N\n self.N = N", "def advance(self, n):\n return _elas.SwigPyIterator_advance(self, n)", "def incr(self, n=1):\n return _libsbml.SwigPyIterator_incr(self, n)", "def _advance(self, c=1):\n self._index += c", "def read(self, n):\n buffer = [] #Buffer for storing digits to output\n \n #While more digits needed (and limit not reached), add more digits\n while len(buffer) < n and self.digits_read < self.limit:\n #Get the next character\n char = self.file.read(1)\n #If out of characters, end search\n if char == '':\n self.file.close()\n self.file = None\n break\n #Only add numerical digits to the buffer\n if char.isdigit():\n buffer.append(int(char))\n self.digits_read += 1\n \n #Return digits\n return buffer", "def advance(self):\n self.pos += 1\n if self.pos > len(self.syntax) - 1:\n self.current_char = None\n else:\n self.current_char = self.syntax[self.pos]", "def inc(self): # type: () -> bool\n try:\n self._idx, self._current = next(self._chars)\n\n return True\n except StopIteration:\n self._idx = len(self._src)\n self._current = TOMLChar(\"\\0\")\n\n return False", "def _next_char(self):\n self.current_position += 1\n if self.current_position >= len(self.stream):\n self.current_char = \"\\0\"\n self.EOF = True\n else:\n self.current_char = self.stream[self.current_position]\n if self.current_char == \"\\n\":\n self.line_number += 1\n self.line_start_position = self.current_position", "def process_next_char(self): \n self.current_position += 1\n if self.current_position >= len(self.code_input):\n '''End of file since the position is equal to or greater than the input's position'''\n self.current_char = '\\0' #EOF\n print('end of line')\n self.current_char = self.code_input[self.current_position]", "def advance(self):\n self.pos += 1\n if self.pos < len(self.text):\n self.current_char = self.text[self.pos]\n else:\n self.current_char = None", "def wc(substring):\n n = 0\n try:\n while True:\n n += (yield)\n except GeneratorExit:\n print(substring, n, flush=True)", "def jumpahead(self, n):\n self.counter += n\n self.basehash.update(b'\\x00'*n)", "def read_count(f, n):\n buf = ''\n while len(buf) < n:\n nextchunk = f.read(n - len(buf))\n if not nextchunk:\n return ''\n buf += nextchunk\n return buf", "def _advance(self):\n self._current += 1", "def incr(self, n=1):\n return _osgAnimation.SwigPyIterator_incr(self, n)", "def skip(self, count):\n self.bytepos += count", "def next_n(self, n: int, fast_forward=False):\n raise NotImplementedError('Subclass must define the next_n method')", "def getcharsposix(n):\n\t\n\tfd = sys.stdin.fileno()\n\toldSettings = termios.tcgetattr(fd)\n\tstring = \"\"\n\ti = 0\n\t# Loop until we get N chars\n\twhile i <= n:\n\t\t# Do some magic\n\t\ttry:\n\t\t\ttty.setcbreak(fd)\n\t\t\tanswer = sys.stdin.read(1)\n\t\t\tif answer == b'\\x03':\n\t\t\t\traise KeyboardInterrupt()\n\t\t\ttry:\n\t\t\t\tstring += str(answer, ENCODING)\n\t\t\texcept UnicodeDecodeError:\n\t\t\t\tcontinue\n\t\tfinally:\n\t\t\ttermios.tcsetattr(fd, termios.TCSADRAIN, oldSettings)\n\t\t\ti += 1\n\t# Return string\n\treturn string", "def read(self, n=1):\n return 0", "def feed(self, token, test_newline=True):\n if test_newline:\n newlines = token.count(self.newline_char)\n if newlines:\n self.line += newlines\n self.line_start_pos = self.char_pos + token.rindex(self.newline_char) + 1\n\n self.char_pos += len(token)\n self.column = self.char_pos - self.line_start_pos + 1", "def update(self):\n self.__token += self.__lines[self.__i]\n self.__i += 1", "def next_token(self):\n p = self.re_token.search(self.remain)\n if not p:\n return None\n # move forward.\n s = p.start()\n self.buffer.append(self.remain[:s].encode(string_escape))\n self.cur += s + len(p.group())\n\n return p", "def consume (self, n) :\r\n if (n<0 or n>len(self)) :\r\n m = \"Trying to consume more data than in Circ. Buff\"\r\n raise Exception(m)\r\n \r\n self.empty_ = (n==len(self))\r\n self.nextGet_ = (self.nextGet_+n) % self.capacity()", "def Advance(self, *, forward: bool = True, amount: int = 1, extend: bool = False):\n i = self.Index\n if forward: i += amount\n else: i -= amount\n\n if i > self.Count:\n if extend:\n for _ in range(amount): self.Append('')\n else: i = self.Count\n elif i < 0: i = 0\n\n self.Index = i", "def advance(self):\n self.__token = \"\"\n if self.__i >= len(self.__lines):\n return\n while self.__i < len(self.__lines) and self.__lines[self.__i] in JackTokenizer.redundant: # advance as long as you see redundant chars\n self.__i += 1\n\n if self.__i >= len(self.__lines):\n return\n\n if self.__lines[self.__i] == \"\\\"\":\n self.update()\n while self.__lines[self.__i] != \"\\\"\": # str const\n self.update()\n self.update()\n return\n\n if self.__lines[self.__i].isdigit(): # int const\n while self.__lines[self.__i].isdigit():\n self.update()\n return\n\n if self.__i < (len(self.__lines) - 1) and self.__lines[self.__i:self.__i + 2] == \"//\": # comment\n while self.__i < len(self.__lines) and self.__lines[self.__i] != \"\\n\":\n self.__i += 1\n self.advance()\n return\n\n if self.__i < (len(self.__lines) - 1) and self.__lines[self.__i:self.__i + 2] == \"/*\": # comment\n self.__i += 1\n while self.__lines[self.__i:self.__i + 2] != \"*/\":\n self.__i += 1\n self.__i += 2\n self.advance()\n return\n\n if self.__i < (len(self.__lines) - 2) and self.__lines[self.__i:self.__i + 3] == \"/**\": # comment\n self.__i += 2\n while self.__lines[self.__i:self.__i + 2] != \"*/\":\n self.__i += 1\n self.__i += 2\n self.advance()\n return\n\n if self.__lines[self.__i] in JackTokenizer.symbols: # symbol\n self.update()\n return\n\n else: # other cases\n while self.__lines[self.__i] not in JackTokenizer.symbols and self.__lines[self.__i] not in \" \\t\\r\\n\":\n self.update()", "def ctrl_f(self):\n if self.index < len(self.string):\n self.index += 1", "def incr(self, n=1):\n return _elas.SwigPyIterator_incr(self, n)", "def character_limit(self) -> None:\n if len(self.text) > 0:\n if len(self.text) == 1 and not self.input_validation(self.text[0]):\n self.text = ''\n else:\n if self.input_validation(self.text[-1]):\n self.text = self.text[-1]\n else:\n self.text = self.text[-2]\n return None", "def process(self):\n self.reader += 1", "def advance(self) -> None:\n self.current_token = self.jack_file_tokens[self._token_idx]\n self._token_idx += 1", "def minOperations(n):\n global counter\n counter = 0\n text_len = 1\n copy_len = 0\n if (type(n) != int or n <= 0):\n return (0)\n while (text_len != n):\n if (n % (text_len) == 0):\n copy_len = copy_all(text_len)\n text_len = paste(text_len, copy_len)\n return (counter)", "def _advance(self):\n self._current += self._increment # Accessing the superclass's field", "def _seek_to_n_lines_from_end_ng(f, numlines=10):\n\tline_count = 0;\n\n\tfor line in f:\n\t\tline_count += 1;\n\tpos = line_count - numlines;\n\tif (pos >= 0):\n\t\tf.seek(pos, 0);\n\telse:\n\t\tf.seek(0, 0);", "def parse(self, data):\n self._readahead.write(data)\n buf = self._readahead.getvalue()\n if len(buf) < 4:\n return\n while len(buf) >= 4:\n size = int(buf[:4], 16)\n if size == 0:\n self.handle_pkt(None)\n buf = buf[4:]\n elif size <= len(buf):\n self.handle_pkt(buf[4:size])\n buf = buf[size:]\n else:\n break\n self._readahead = BytesIO()\n self._readahead.write(buf)", "def n_char(self,char,n,w=1,h=1):\n for i in range(n):\n self.esprint(char,w,h)", "def skip(self):\r\n length = self.next_byte()\r\n while length != b\"\\x00\" and length:\r\n self.next_bytes(parse_int(length, 'big'))\r\n length = self.next_byte()", "def advance(self):\n if self.current_index < (len(self.decoded_population) - 1):\n self.current_index += 1", "def incr(self, n=1):\n return _SALOMERuntime.SALOMERuntime_PySwigIterator_incr(self, n)", "def input_buffer_peek_n(self, n):\n assert self.curr_input_buff_idx + n - 1 <= len(self.input_buffer)\n return self.input_buffer[self.curr_input_buff_idx:self.curr_input_buff_idx+n]", "def RecCountup(n):\n if n == 0:\n return print('0')\n RecCountup(n - 1)\n print(n)", "def increment(self, n=1):\n with self.current_counter.get_lock():\n self.current_counter.value += n", "def _popN(self, n):\n for _ in range(n):\n self._buffer.popleft()", "def enter(self):\n if self.pos < self.line_length():\n # If the position is not at the end of the line split the line\n self.buffer.split_line(self.line, self.pos)\n else:\n self.buffer.insert_line(\"\", self.line + 1)\n \n self.line += 1\n self.pos = 0\n self.has_changes = True", "def missing_char(str, n):\r\n if n<=len(str):\r\n str = str.replace(str[n], \"\")\r\n return str", "def read(self, buf, n):\n l = min(len(self.prev), n)\n buf[:l] = self.prev[:l]\n self.prev = self.prev[l:] # pitfall self.prev = []\n\n idx = l # the next reading\n while idx < n:\n buf4 = [\"\" for _ in xrange(4)]\n r = read4(buf4)\n if idx+r < n:\n buf[idx:idx+r] = buf4[:r]\n idx += r\n if r < 4: return idx\n else:\n buf[idx:n] = buf4[:n-idx]\n self.prev = buf4[n-idx:r] # pitfall buf4[n-idx:]\n idx = n\n\n return idx", "def extend(self, n_iterations):\n if self._iteration + n_iterations > self.number_of_iterations:\n # This MUST be assigned to a property or the storage won't be updated.\n self.number_of_iterations = self._iteration + n_iterations\n self.run(n_iterations)", "def skip(self, n=None):\n while n > 0:\n try:\n self.next()\n except StopIteration:\n break\n n -= 1", "def increment(self):\n self.increments += 1\n if self.increments == self.length:\n self.finished = True", "def readline(self, size=-1):\n ...", "def expanding(self,pos_0,pos_1,n):\r\n cnvt_front=self.string(pos_0,pos_1,n)\r\n if int(cnvt_front) in self.expanded:\r\n\r\n a=1\r\n else:\r\n self.expanded.append(int(cnvt_front))", "def process(self):\n\n count = 0\n total = 0\n\n while total < 200 and count < 10:\n digits = self._stream.read(2)\n if len(digits) < 2:\n break\n \n number = int(digits)\n \n total += number\n \n count += 1\n\n return count", "def end_of_input():\n return at_end.bind(lambda end:\n Parser(lambda chunk, last: ParserResult.from_error(\"Not end of input\"))\n if not end else Parser.unit(None))", "async def readexactly(self,\n n: int\n ) -> bytes:\n if n < 1:\n return b''\n\n future = asyncio.Future()\n try:\n self._read_queue.put_nowait((future, True, n))\n return await future\n\n except aio.QueueClosedError:\n raise ConnectionError()", "def read(self, nChar=None):\n raise NotImplementedError()", "def advance(self, i):\n sys.stdout.write('\\r')\n sys.stdout.write(\"[%-30s] %d%%\" % ('=' * int(\n ceil(i / self._n * self._length)),\n (i + 1) / self._n * 100))\n sys.stdout.flush()", "def record_digits_acc(n, pos, acc):\n if pos == len(n):\n return acc\n acc[int(n[pos])] += 1\n return record_digits_acc(n, pos+1, acc)", "def consume(self) -> str:\n c = self.current\n self.pos += 1\n return c", "def indent(self, n):\n self._ind = max(0, self._ind + n)", "def incrementOctave(self, delta):\n\n if delta != 0:\n self.nbr = limiter(self.nbr + (delta * 12))", "def make_line(self, char: str, count: int):\r\n return char * count", "def line(n, str):\n\n return_value = ''\n for _ in range(n):\n return_value += str\n return return_value", "def ngrams(n, target):\n chars = collections.deque()\n while True:\n chars.append((yield))\n if len(chars) == n: \n target.send(chars)\n chars.popleft()", "def _buffer_to(self, amount):\n if amount > self.lookahead:\n raise Exception(\n 'Cannot extend buffer to {}: '\n 'beyond buffer lookahead {}'.format(\n amount, self.lookahead\n )\n )\n while len(self.buffer) < amount:\n try:\n self.buffer.appendleft(next(self.stream))\n except StopIteration:\n break", "def _seek_to_n_lines_from_end(f, numlines=10):\n\tbuf = \"\"\n\tbuf_pos = 0\n\tf.seek(0, 2) # seek to the end of the file\n\tline_count = 0\n\n\twhile line_count < numlines:\n\t\tnewline_pos = buf.rfind(\"\\n\", 0, buf_pos)\n\t\tfile_pos = f.tell()\n\n\t\tif newline_pos == -1:\n\t\t\tif file_pos == 0:\n\t\t\t\t# start of file\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\ttoread = min(1024, file_pos)\n\t\t\t\tf.seek(-toread, 1)\n\t\t\t\tbuf = f.read(toread) + buf[:buf_pos]\n\t\t\t\tf.seek(-toread, 1)\n\t\t\t\tbuf_pos = len(buf) - 1\n\t\telse:\n\t\t\t# found a line\n\t\t\tbuf_pos = newline_pos\n\t\t\tline_count += 1\n\n\tif line_count == numlines:\n\t\tf.seek(buf_pos + 1, 1)", "def num_tokens(self, index):\r\n raise NotImplementedError", "def expect_eol(self):\n if self.length != 0:\n raise ParseError('Spurius words after parsing instruction')", "def next(self):\r\n\t\tself.index += 1\r\n\t\treturn not self.eof()", "def n_char_generate(self,char,n):\n return char*n", "def error_till_line_end(self, start, text):\n end = start\n try:\n while text[end] != '\\n': # there's whitespace in rules\n end += 1\n except IndexError:\n end = len(text)\n if end != start:\n self.cur.append((start, Error, text[start:end]))\n end = self.whitespace(end, text)\n return end", "def _advance_line(self):\n self.current_index += 1\n if self.current_index >= len(self.file):\n self.current_line = 'EOF'\n return\n self.current_line = self.file[self.current_index].strip()\n while self.current_line.startswith('#') or self.current_line == '':\n self.current_index += 1\n if self.current_index >= len(self.file):\n self.current_line = 'EOF'\n return\n self.current_line = self.file[self.current_index].strip()\n self._gobble_comments()", "def escAmpp(self) :\n while 1 :\n (value, end) = self.getInteger()\n if value is None :\n return\n if end == 'X' : \n self.pos += value\n #self.logdebug(\"SKIPTO %08x\" % self.pos)", "def advance(self):\n line = self.stream.readline()\n while line is not None:\n # Strip comments or empty spaces\n line = re.sub('//.*', '', line).strip()\n\n # Avoid comments or empty lines\n if line != '':\n break\n\n line = self.stream.readline()\n\n if line is None:\n print \"No more commands.\"\n return\n\n self.current_command = line", "def advance(self):\n self._current_inst += 1\n self._line = self._lines[self._current_inst].strip()", "def nextChar(self):\r\n\t\tself.index += 1\r\n\t\treturn self.currentChar()", "def move_right(self) -> None:\n if not self.buffer:\n return\n\n if self.index == self.buffer.end:\n return\n\n if self.buffer[self.index] != '\\n':\n self.index += 1", "def inc( self ):\n self.count += 1", "def add_peg(self, peg):\n if self.current_index >= 4:\n return\n self.line[self.current_index] = peg\n self.current_index += 1", "def readline(self):\n newstring = ''\n substring = ''\n for x in self.string:\n if x!= '\\n':\n substring += x\n else:\n index = len(substring) + 1 ## +1 takes into account newline char\n newstring = self.string[index::]\n break\n self.string = newstring\n self.curr = substring", "def _buffer(self, n=None):\n if self._out_of_scope:\n raise ResultConsumedError(self, _RESULT_OUT_OF_SCOPE_ERROR)\n if self._consumed:\n raise ResultConsumedError(self, _RESULT_CONSUMED_ERROR)\n if n is not None and len(self._record_buffer) >= n:\n return\n record_buffer = deque()\n for record in self:\n record_buffer.append(record)\n if n is not None and len(record_buffer) >= n:\n break\n if n is None:\n self._record_buffer = record_buffer\n else:\n self._record_buffer.extend(record_buffer)\n self._exhausted = not self._record_buffer" ]
[ "0.6357484", "0.5569419", "0.55248344", "0.5505389", "0.5498772", "0.5482731", "0.54729337", "0.54318476", "0.5427386", "0.5418166", "0.5390718", "0.5387553", "0.53819525", "0.5361975", "0.5354554", "0.5300933", "0.52418137", "0.5228473", "0.52153623", "0.5198761", "0.5185116", "0.51682264", "0.51611245", "0.51582396", "0.51546836", "0.5148268", "0.513517", "0.511622", "0.51067626", "0.5103464", "0.5100707", "0.50927925", "0.5092298", "0.50734895", "0.50618154", "0.50483716", "0.5043629", "0.5038855", "0.50141215", "0.5000607", "0.49968445", "0.49947932", "0.49925005", "0.49742478", "0.49635348", "0.49617085", "0.49466425", "0.4943496", "0.49360996", "0.4927862", "0.49128208", "0.49034536", "0.48561758", "0.4842639", "0.48327166", "0.48324108", "0.48317042", "0.48179433", "0.4816161", "0.47947502", "0.47815317", "0.4780822", "0.4779084", "0.47767696", "0.4767544", "0.4764683", "0.47618827", "0.4747405", "0.4739289", "0.47370827", "0.4735888", "0.47343332", "0.47223276", "0.47188738", "0.47159144", "0.4705033", "0.4698777", "0.46885613", "0.46860152", "0.46769178", "0.46766016", "0.46752104", "0.4666983", "0.46408358", "0.46384555", "0.46335328", "0.4633467", "0.46324572", "0.46281448", "0.46246642", "0.46020257", "0.45996854", "0.45951062", "0.45942003", "0.4591802", "0.4586455", "0.45816463", "0.45779136", "0.4577013", "0.4576544", "0.4575129" ]
0.0
-1
Returns True if the parser has reached the end of the input.
def end(self): # type: () -> bool return self._idx >= len(self._src) or self._current == "\0"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_at_end(self):\n return self._peek().token_type == scanner.TokenType.EOF", "def is_eof(self) -> bool:\n ...", "def end_of_input():\n return at_end.bind(lambda end:\n Parser(lambda chunk, last: ParserResult.from_error(\"Not end of input\"))\n if not end else Parser.unit(None))", "def eof(self):\r\n\t\treturn self.index == len(self.data)", "def isEOF(self):\n return _libsbml.XMLToken_isEOF(self)", "def done_parsing(self):\n # STUDENT\n return (self.input_buffer_len() == 1 ) and (self.stack_len()==1) \n # END STUDENT", "def _is_eof(self, symbol):\n if symbol.type == self.scanner.EOF:\n return True\n else:\n return False", "def at_eof(self) -> bool:\n ...", "def at_eof(self) -> bool:\n ...", "def at_eof(self) -> bool:\n ...", "def at_eof(self) -> bool:\n ...", "def has_end(self):\n return bool(self._end)", "def has_next(self):\n try:\n self.next()\n return True\n except (ParseException, struct.error):\n return False", "def at_eof(self):\n return self.tell() == len(self)", "def isEOF(self):\n return _libsbml.XMLInputStream_isEOF(self)", "def isEnd(self):\n return _libsbml.XMLToken_isEnd(self)", "def _is_end(self, symbol):\n if symbol.id == self.scanner.END_ID:\n return True\n else:\n return False", "def at_eof(self):\n return self._eof and not self._buffer", "def eos(self):\n return self.pos == len(self.string)", "def has_finished(self) -> bool:\n return self.pos >= len(self.tokens)", "def eof(self):\n\t\tif not self._input: raise PlumberExceptions.PipeTypeException(self)\n\t\tresult = pservlet.pipe_eof(self._pipe_desc)\n\t\tif result > 0: return True\n\t\telif result == 0: return False\n\t\traise PlumberExceptions.PlumberNativeException(\"Cannot finish the API call to pipe_eof\")", "def eof(self):\n try:\n next_line = self.read_pkt_line()\n except HangupException:\n return True\n self.unread_pkt_line(next_line)\n return False", "def eof(self):\n\t\treturn not self.is_alive() and self._queue.empty()", "def eof_received(self):\n self.connection_lost('EOF')\n return False", "def eof(self):\n return not self.is_alive() and self._queue.empty()", "def is_eof(line):\n return line == \"\"", "def has_next(self):\n # type: () -> bool\n return len(self.buffer) > 0", "def EndOfPacket(self) -> bool:", "def end_input(self):\n inp = input()\n if inp.upper() == \"Q\":\n return False\n if inp == \"\" \\\n \"\":\n return True\n return self.end_input", "def do_EOF(self, arg):\n return True", "def eol(self):\n return self.pos == len(self.tokens)", "def do_EOF(self, args):\n return True", "def do_EOF(self, args):\n return True", "def do_EOF(self, args):\n return True", "def do_EOF(self, arg):\n \treturn True", "def has_finished(self) -> bool:\n return self.pos >= len(self.s)", "def is_end_node():\n return False", "def _is_at_end(self):\n return self.current >= len(self.source)", "def hasNext(self):\n return bool(self.peek())", "def atEnd(self):\n return (self.ins.tell() == self.size)", "def has_next(self) -> bool:\n return self.peek() != self.sentinel", "def _check(self, token_type):\n if self._is_at_end():\n return False\n\n return self._peek().token_type == token_type", "def isEndFor(self, *args):\n return _libsbml.XMLToken_isEndFor(self, *args)", "def _is_end(self, line):\n if re.match(\"\\s+submit|complete|issued|latency\\s+\\:\\s+.*\", line):\n return True", "def hasNext(self) -> bool:\n return self.stack != []", "def __bool__(self):\n return self.end < len(self.data)", "def has_more_lines(self):\n pos = self.stream.tell()\n res = self.stream.readline() != ''\n self.stream.seek(pos)\n return res", "def do_EOF(self, argv):\n return True", "def hasNext(self) -> bool:\n return len(self.stack) > 0", "def hasNext(self) -> bool:\n return len(self.stack) > 0", "def hasNext(self) -> bool:\n return len(self.stack) > 0", "def reached_end_of_stream(self):\n pass", "def has_more_commands(self):\n return not self.eof", "def has_next(self):\n return not self.finished_function(self.peek)", "def end_of_epoch(self):\n return not self._cur_epoch_itr.has_next()", "def is_eof(eof):\n return eof == Symbol('#!eof')", "def hasNext(self) -> bool:\n\t\treturn bool(self.stack)", "def do_EOF(self, line):\n return True", "def do_EOF(self, line):\n return True", "def do_EOF(self, line):\n return True", "def has_next(self):\n regf = self.first_hbin().parent()\n if regf.hbins_size() + regf.first_hbin_offset() == self._offset_next_hbin:\n return False\n\n try:\n self.next()\n return True\n except (ParseException, struct.error):\n return False", "def hasNext(self) -> bool:\n if self.stack: return True\n else: return False", "def use_full_parser(text):\n end_of_header_match = _end_of_simple_header_pattern.search(text)\n return end_of_header_match is not None", "def hasNext(self) -> bool:\n return self.block.hasNext()", "def do_EOF(self):\n return self.do_exit()", "def eof_check(self) -> bool:\n eof = False\n curr_pos = self.fileobject.tell()\n # print(curr_pos, self.st_size)\n chunk = self.fileobject.read(25)\n if chunk == '':\n # Is there something on the back burner??\n if len(self._backburner) > 0:\n self.fileobject = self._backburner.pop()\n # TODO: what if it is the end of the back burner file? Is that handled?\n else:\n eof = True\n else:\n self.fileobject.seek(curr_pos)\n return eof", "def expect_eol(self):\n if self.length != 0:\n raise ParseError('Spurius words after parsing instruction')", "def iscomplete(self):\n if self.__start_line < 0:\n return False\n if self.__end_line < self.__start_line:\n return False\n return True", "def hasNext(self):\n return True if self.stack else False", "def true(self):\n val = self.read(4)\n if val != b'true':\n self.on_parser_error(\"true token expected\")\n return True", "def lineTerminatorAhead(self):\n # Get the token ahead of the current index.\n possibleIndexEosToken = self.getCurrentToken().tokenIndex - 1\n ahead = self._input.get(possibleIndexEosToken)\n\n if ahead.channel != Lexer.HIDDEN:\n # We're only interested in tokens on the HIDDEN channel.\n return False\n\n if ahead.type == ECMAScriptParser.LineTerminator:\n # There is definitely a line terminator ahead.\n return True\n\n if ahead.type == ECMAScriptParser.WhiteSpaces:\n # Get the token ahead of the current whitespaces.\n possibleIndexEosToken = self.getCurrentToken().tokenIndex - 2\n ahead = self._input.get(possibleIndexEosToken)\n\n # Get the token's text and type.\n text = ahead.text\n type = ahead.type\n\n # Check if the token is, or contains a line terminator.\n return (type == ECMAScriptParser.MultiLineComment and \\\n ('\\r' in text or '\\n' in text)) or \\\n (type == ECMAScriptParser.LineTerminator)", "def hasNext(self) -> bool:\n return self.stack or self.node", "def next(self):\r\n\t\tself.index += 1\r\n\t\treturn not self.eof()", "def end_of_line():\n d = get_app().current_buffer.document\n at_end = d.is_cursor_at_the_end_of_line\n last_line = d.is_cursor_at_the_end\n\n return bool(at_end and not last_line)", "def _is_terminated(self) -> bool:\n raise NotImplementedError", "def is_done(self):\n return True if self.t >= self.max_ep_len else False", "def hasNext(self) -> bool:\n return self.pointer < len(self.ordered_nodes)", "def do_EOF(self, args):\n print(\"\")\n return True", "def hasNext(self) -> bool:\n return self.idx < len(self.m) - 1", "def hasNext(self) -> bool:\n return self.idx < len(self.m) - 1", "def end(self):\n return next((row for row in self.model if 0 in row), None) is None and self.valid", "def isFull(self):\n return (self.end + 1) % self.max_length == self.start", "def has_next(self) -> bool:\n return self._bin_iter.has_next()", "def hasNext(self) -> bool:\n return len(self.list) != 0", "def handle_close_tag(self, token, lexer):\n prefix, tag=self._split_tagname(token.text[1:])\n if self.tagName==tag and self.prefix==prefix:\n self._token=t=lexer.next()\n if t.tokenType==t_END_TAG:\n return True\n else:\n self.handle_error(\"malformed tag\")\n # if we get here\n self.handle_error('close tag not expected in this context')", "def done(self):\n return self.err is not None or self.offset >= len(self.script)", "def has_ended(self):\r\n if self.end is None:\r\n return False\r\n\r\n return datetime.now(UTC()) > self.end", "def is_done(self):\n return self.is_terminated or self.is_truncated", "def hasNext(self) -> bool:\r\n return len(self.comb) > self.ind", "def isComplete(self):\n return self.bytesToRead == 0", "def _is_close_tag(self, tokenized_line):\n if tokenized_line[0] == '</':\n return True\n else:\n return False", "def if_end(self, **kwargs):\n\n index = self.get('_index')\n\n if index and index >= len(self.steps)-1:\n return True # all steps have been used\n\n return False", "def hasNext(self) -> bool:\n return self.stack", "def is_chunk_end(prev_tag, tag):\n prefix1, chunk_type1 = split_tag(prev_tag)\n prefix2, chunk_type2 = split_tag(tag)\n\n if prefix1 == 'O':\n return False\n if prefix2 == 'O':\n return prefix1 != 'O'\n\n if chunk_type1 != chunk_type2:\n return True\n\n return prefix2 in ['B', 'S'] or prefix1 in ['E', 'S']", "def hasNextLine(self) -> bool:\n raise NotImplementedError", "def hasNext(self) -> bool:\n return self.elements != []", "def _has_end_of_track(self, track):\n last_i = len(track) - 1\n for i, message in enumerate(track):\n if message.type == 'end_of_track':\n if i != last_i:\n raise ValueError('end_of_track not at end of the track')\n return True\n else:\n return False", "def hasNext(self) -> bool:\n raise NotImplementedError", "def end_of_chunk(prev_tag, tag, prev_type, type_):\n chunk_end = False\n\n if prev_tag == 'E': chunk_end = True\n if prev_tag == 'S': chunk_end = True\n\n if prev_tag == 'B' and tag == 'B': chunk_end = True\n if prev_tag == 'B' and tag == 'S': chunk_end = True\n if prev_tag == 'B' and tag == 'O': chunk_end = True\n if prev_tag == 'I' and tag == 'B': chunk_end = True\n if prev_tag == 'I' and tag == 'S': chunk_end = True\n if prev_tag == 'I' and tag == 'O': chunk_end = True\n\n if prev_tag != 'O' and prev_tag != '.' and prev_type != type_:\n chunk_end = True\n\n return chunk_end", "def isend(self, dest=None, tag=None, comm=None):\n return comm.isend(self, dest=dest, tag=tag)" ]
[ "0.8109592", "0.75955963", "0.7418105", "0.7389045", "0.7385828", "0.73675007", "0.733003", "0.7321836", "0.7321836", "0.7321836", "0.7321836", "0.71485347", "0.7051112", "0.7041451", "0.70217884", "0.70157605", "0.69563675", "0.695557", "0.69082236", "0.6880412", "0.6842493", "0.6841862", "0.66864353", "0.662894", "0.65653664", "0.65481657", "0.6498451", "0.6472087", "0.64476913", "0.6442254", "0.64209104", "0.6379928", "0.6379928", "0.6379928", "0.6365595", "0.6325923", "0.63220364", "0.6292008", "0.6278873", "0.6272129", "0.62574565", "0.6245595", "0.62299716", "0.62293047", "0.6225317", "0.6212081", "0.6193818", "0.6192436", "0.61870956", "0.61870956", "0.61870956", "0.6181276", "0.6174954", "0.6171558", "0.61459416", "0.6136224", "0.61344415", "0.60863477", "0.60863477", "0.60863477", "0.60489935", "0.604308", "0.60363007", "0.6035099", "0.60040814", "0.5988585", "0.59830856", "0.5981537", "0.5975153", "0.59594727", "0.59347486", "0.59267265", "0.58883166", "0.5872134", "0.5867655", "0.58455783", "0.5782442", "0.5769441", "0.5757467", "0.5757467", "0.57511663", "0.57482225", "0.5741632", "0.5701145", "0.568696", "0.56802094", "0.5677984", "0.56723404", "0.5670633", "0.56700873", "0.56573975", "0.5646556", "0.564474", "0.56413937", "0.5628296", "0.5626726", "0.5600555", "0.5596265", "0.5581208", "0.55782986" ]
0.70420015
13
Sets the marker to the index's current position
def mark(self): # type: () -> None self._marker = self._idx
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mark_pos(self, position, marker):\n i, j = self.board[position]\n self.grid[i][j] = marker", "def __setitem__(self, index, value):\n self.position[index] = value", "def set_mark( self, mark, index ):\n\n try:\n int(self.__grid[index-1])\n\n if mark.lower() == 'x' or mark.lower() == 'o': \n self.__grid[index-1] = mark\n\n return 1\n\n except ValueError:\n return 0", "def move(self, coord, mark):\n self.arr[coord] = mark", "def _set_index(self):\n self.index = 0\n # If offset is negative, target window might start before 0\n self.index = -min(0, self._get_target_index())", "def setPosition(position):", "def place_marker(board, marker, position):\n board[position] = marker", "def __setitem__(self, index, value):\n self.points[index] = value", "def __setitem__(self, index, value):\n self.points[index] = value", "def place_marker(board, marker, position1):\n board[position1] = marker", "def setPositionKey(self, time, index, value, id, view) -> None:\n ...", "def set_current_index(self, index):\r\n self.contents_widget.setCurrentRow(index)", "def __setitem__(self, pos, val):\n self._coords[pos] = val", "def position(self, pos: int):\n self.__pos = pos", "def _new_marker_set(self, markers, key):\n if len(markers.shape) > 2 and markers.shape[2] > 1:\n raise IndexError(\"Markers should be from one frame only\")\n self.markers[key].data = markers\n\n # Remove previous actors from the scene\n for actor in self.markers[key].actors:\n self.parent_window.ren.RemoveActor(actor)\n self.markers[key].actors = list()\n\n # Create the geometry of a point (the coordinate) points = vtk.vtkPoints()\n for i in range(markers.channel.size):\n # Create a mapper\n mapper = vtkPolyDataMapper()\n\n # Create an actor\n self.markers[key].actors.append(vtkActor())\n self.markers[key].actors[i].SetMapper(mapper)\n\n self.parent_window.ren.AddActor(self.markers[key].actors[i])\n\n # Update marker position\n self._update_markers(self.markers[key].data, key)", "def offset_index(self, offset):\n if self.has_index:\n self.index += offset", "def set_position(self, idx, pos):\n if self.EMULATOR_MODE:\n return\n if idx >= self.nleaflets or idx < 0:\n raise IndexError('index specified is out of bounds')\n self._fserial.write(self.MAGIC_BYTES + bytes([idx]) + pos.to_bytes(2, byteorder='big', signed=False) )\n self._fserial.reset_input_buffer()", "def new_marker_set(self, markers):\n if markers.get_num_frames() is not 1:\n raise IndexError(\"Markers should be from one frame only\")\n self.markers = markers\n\n # Remove previous actors from the scene\n for actor in self.markers_actors:\n self.parent_window.ren.RemoveActor(actor)\n self.markers_actors = list()\n\n # Create the geometry of a point (the coordinate) points = vtk.vtkPoints()\n for i in range(markers.get_num_markers()):\n # Create a mapper\n mapper = vtkPolyDataMapper()\n\n # Create an actor\n self.markers_actors.append(vtkActor())\n self.markers_actors[i].SetMapper(mapper)\n\n self.parent_window.ren.AddActor(self.markers_actors[i])\n self.parent_window.ren.ResetCamera()\n\n # Update marker position\n self.update_markers(self.markers)", "def __setitem__(self, j, val):\n\t\tself._coords[j] = val", "def set_position(self, idx, pos):\n if not self.is_valid_idx(idx):\n raise IndexError('index specified is out of bounds')\n self.send_structured_signal(self.PRE_ABSPOS_ONE, bytes([idx]) + pos.to_bytes(2, byteorder='big', signed=True))", "def update_markers(self, selected_index=-1, highlight_color=(1, 0, 0, 1)):\n self.marker_colors.fill(1)\n # default shape (non-highlighted)\n shape = \"o\"\n size = 6\n if 0 <= selected_index < len(self.marker_colors):\n self.marker_colors[selected_index] = highlight_color\n # if there is a highlighted marker,\n # change all marker shapes to a square\n shape = \"s\"\n size = 8\n self.markers.set_data(pos=self.pos, symbol=shape, edge_color='red',\n size=size, face_color=self.marker_colors)", "def set_offset(self, index, value):\n if self.is_leaf():\n raise TerminalNodeException\n if value is None:\n return\n else:\n self.offsets[index] = value", "def set_index(self, index):\n self.index = index", "def SetCurrentPosition(self,pos):\n\n if self.Reverse: pos*=-1\n self.Bus.Transaction(chr(self.Address)+chr(0x40)+struct.pack('@l',pos))", "def set_node(self, index, node):\r\n self.loc.coord[index] = node", "def set_new_location(self, xPos, yPos):", "def set_position(self, position):\n self.position = tuple(position)", "def set_pos(self, p: tuple) -> None:\n self.pos = p", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(self):\n self.data['pos-x'] = \"%s\" % self.x()\n self.data['pos-y'] = \"%s\" % self.y()", "def change_position(self, file_pos, series_index=None):\n if series_index is None:\n series_index = self._series_index\n self.advance_in_series(series_index)\n self.advance_in_file(file_pos)", "def set_position(self, pos):\n self.ref_pos = pos", "def set_position(self, position):\n self.set_current_position(position)", "def seek(self, index: int, /) -> str:\n self.index = index\n return self.current", "def setInternalIndex(self,ind):\n\t\tself.trMtrxNode_ind = ind", "def __setitem__(self, index, value):\n if self._list_like(index):\n len_var = len(index)\n if len_var==0:\n raise IndexError(\"Received empty index.\")\n elif len_var==1:\n self._points[index[0]] = value\n elif len_var==2:\n # safeguard against empty entries\n if index[0] not in self._points:\n self._points[index[0]] = StatePoint()\n self._points[index[0]][index[1]] = value\n else:\n raise IndexError(\"Received too long index.\")\n else:\n self._points[index] = value", "def set_tile_marker(self, x, y, marker):\n self.__tile_grid[y][x].configure(image=self.__marker_images[marker])", "def set_at_index(self, index: int, value: object) -> None:\n self.data[index] = value", "def set_position(self, new_pos):\n self._position = new_pos", "def __setitem__(self, k, value):\n self._coords[k] = value", "def refresh_mark(self):\n current = self.player.current_position()\n if current != None:\n if self.prev_song != None and self.prev_song < len(self.buf):\n self.buf[self.prev_song] = ' ' + self.buf[self.prev_song][1:]\n self.buf[current] = '-' + self.buf[current][1:]\n self.prev_song = current\n # Move cursor to current position.\n vim.current.window.cursor = (current + 1, 1)", "def SetCurrentPosition(self, point):\n\t\t# So that we can correctly determine the position on a path\n\t\tself.previousPositions.append(self.currentPosition)\n\n\t\tself.currentPosition = point\n\n\t\t# self.Calculate()", "def set_move(self, position: Point, mark: Mark) -> None:\n\t\tif mark == Mark.X:\n\t\t\tself.tiles[position.x][position.y] = 1\n\t\telse:\n\t\t\tself.tiles[position.x][position.y] = -1", "def __setitem__(self, index: int, value: float) -> None:\n self._previous_values[index] = value", "def index(self, index):\n\n self._index = index", "def index(self, value, i=0, j=None):\n # YOUR CODE HERE\n raise NotImplementedError()", "def mark(self, mark):\n\n self._mark = mark", "def NewStartingIndex(self) -> int:", "def set_v_item(self, vindex, new_val):\n\n i = [((0, 0),),\n ((1, 1),),\n ((2, 2),),\n ([1, 2], [2, 1]),\n ([2, 0], [0, 2]),\n ([0, 1], [1, 0])]\n\n for j, k in i[vindex]:\n self[j, k] = new_val", "def set_cur_neighbor(self, neighbor):\n self.cur_neighbor = neighbor", "def set_pos(self, x):\n self._pos = x", "def set_item(self, y_pos, x_pos):\n self.map[y_pos][x_pos] = 'X'", "def set_current(self, cell):\n\n if self.aux is not None:\n self.charMap[self.aux[0]][self.aux[1]].is_current = False\n self.aux = cell\n self.charMap[cell[0]][cell[1]].is_current = True", "def index(self, index):\n \"\"\"\n if index is None:\n raise ValueError(\"Invalid value for `index`, must not be `None`\")\n \"\"\"\n\n self.container['index'] = index", "def set_cell(self, point, cell):\n self._grid[point.x][point.y] = cell", "def __setitem__(self, pos, is_on):\n row, column = pos\n self.bits[row][column] = is_on", "def set_editable_point(self, point_index):\r\n self._point_index = point_index\r\n #flags: the stringvar will change because the point is selected\r\n #checked in the callback to only modify the structure if the user edits the fields\r\n self.inhibit_callbacks = True\r\n\r\n self._point_index_var.set(f\"Vertex {self._point_index}\")\r\n self.editable_x.set(round(self._structure.points[point_index][0], 1))\r\n self.editable_y.set(round(self._structure.points[point_index][1], 1))\r\n\r\n self.inhibit_callbacks = False", "def set_index(self, nIndex):\n\t\tcall_sdk_function('PrlVmDev_SetIndex', self.handle, nIndex)", "def index(self, new_index):\n old_index = self._index\n\n L = len(self.results)\n if L == 0:\n new_index = -1\n elif new_index < 0:\n new_index = 0\n elif L - 1 < new_index:\n new_index = L - 1\n\n self._index = new_index\n self._update_preview_content()\n\n # update results formatting\n self._update_selected_result(old_index, new_index)", "def update_idx(self):\n self.idx = (self.F * self.FMUL +\n self.E * self.EMUL +\n self.Z * self.ZMUL +\n self.A * self.AMUL +\n self.B * self.BMUL )", "def setPosition(self, position, view) -> None:\n ...", "def __setitem__(self, i, value):\n if i < X:\n raise IndexError(\"point3d::__setitem__: negative index {0}\".format(i))\n if i == X:\n self._x = value\n return\n if i == Y:\n self._y = value\n return\n if i == Z:\n self._z = value\n return\n # beyond Z\n raise IndexError(\"point3d::__setitem__: index too large {0}\".format(i))", "def set_offset( self, offset ):\n assert offset in range( len( self.buffer ) )\n self.pos = offset\n self._fill_buffer()", "def set_position(self, position):\n self.position = position", "def set_position(self, position):\n raise NotImplementedError()", "def marker(self):\n self.marker1 = MapMarker(lat=lat1, lon=lon1, source='green_marker.png')\n self.marker2 = MapMarker(lat=lat2, lon=lon2, source='red_marker.png')\n self.ids.mapview.add_marker(self.marker1)\n self.ids.mapview.add_marker(self.marker2)", "def set_current(self, i, ch):\n self.write(\"ISET\" + str(ch) + \":\" + str(i) + \"\\n\")", "def set_offset(self, offset):\r\n for b in self.buf:\r\n b.set_offset(offset)", "def set_selected_point(self, i):\n\n if i < len(self.poses):\n self.selected_point = min(len(self.poses), max(0, i))\n self.calibration_changed()", "def setX(self, value):\n self.position[0] = value", "def start(self, marker):\n\t\tself.marker = marker\n\t\tself.print_instructions()", "def update_position(position):\n pass", "def __setitem__(self, index: int, value: object) -> None:\n self.set_at_index(index, value)", "def setPosition(self,newPos):\n self._position = newPos", "def _set_mine(self,index):\n game.get_cell(index).set_mine() #set current index as mine\n game.add_mine(index) #add index to mine_index\n\n # add its neighbor's neighbor_num \n temp_r=index/self._col_num\n temp_c=index%self._col_num\n shift=[[temp_r+dr,temp_c+dc] for dr in self.shifts for dc in self.shifts\n if [temp_r+dr,temp_c+dc]!=[temp_r,temp_c]\n and temp_r+dr in range(0,self._row_num)\n and temp_c+dc in range(0,self._col_num)]\n for s in shift:\n game.get_cell(s[0]*self._col_num+s[1]).add_neighbor()", "def setPidx(self, pidx):\n self.keeper.setGbl(b\"pidx\", b\"%x\" % pidx)", "def set_loc(self, loc):\n self.loc = loc", "def set(self, index, data):\n self.data[index] = data", "def setItem(self, i, j, val):\n if i < 0:\n raise IndexError('Row index must be nonnegative.')\n if j < 0:\n raise IndexError('Column index must be nonnegative.')\n\n self.__m[i - 1][j - 1] = val", "def set_offset(self, pos):\n self.offset = self.bbox().offset(pos)", "def _move_marker(self, (dx, dy)):\n col = self.selected / self.rows\n totalcols = (len(self.options) + self.rows - 1) / self.rows\n old_selected = self.selected\n\n if dy:\n # move marker up or down\n self.selected = max(0, min(self.selected - dy, len(self.options) - 1))\n elif dx:\n # move marker left or right\n if 0 <= col + dx < totalcols:\n # move up to last item in the column if we're below it\n self.selected = min(self.selected + (self.rows * dx),\n len(self.options) - 1)\n\n if self.selected != old_selected:\n self.redraw = True", "def __setitem__(self, idx, val):\n self.rows[idx[0]][idx[1]] = val", "def __setitem__(self, index, value):\n self._update_value_at(index, value)", "def insert_point_marker(self, marker):\n self._points.append(marker)", "def set_position(self, posicion):\n\n # FIXME: Actualmente no funciona bien\n posicion = int(posicion)\n if posicion != self.posicion:\n self.posicion = posicion\n self.entrada.write('seek %s %i 0\\n' % (posicion, 1))\n self.entrada.flush()", "def set_offset(self, offset):\n self.offset = offset", "def set_offset(self, offset):\n self.offset = offset", "def __setitem__(self, index, item):\n if item not in self.REPRESENTATION: raise Exception('Grids can only \\'X\\', \\'O\\' and \\'.\\'')\n self.data[index[0] - 1][index[1] - 1] = item", "def set_marker(self, packet):\n if self.file_type == 'csv':\n self.write_data(packet=packet)\n elif self.file_type == 'edf':\n timestamp, code = packet.get_data()\n if self._rectime_offset is None:\n self._rectime_offset = timestamp[0]\n timestamp = timestamp-np.float64(self._rectime_offset)\n self._file_obj.writeAnnotation(timestamp[0], 0.001, str(int(code[0])))", "def modify_pos(self, k, delta):\n self.pos[k] += delta" ]
[ "0.7185202", "0.66934663", "0.6468027", "0.64015055", "0.63248855", "0.6277485", "0.6270743", "0.6233759", "0.6233759", "0.6202351", "0.61644644", "0.6080283", "0.60600364", "0.6030192", "0.60263723", "0.602531", "0.6000895", "0.5999829", "0.59920174", "0.5950956", "0.5903941", "0.5885515", "0.58653927", "0.5816945", "0.5803229", "0.57846415", "0.5765265", "0.57603", "0.57571805", "0.57571805", "0.57571805", "0.57571805", "0.57571805", "0.57571805", "0.57571805", "0.57571805", "0.57571805", "0.57571805", "0.57571805", "0.57243663", "0.5722029", "0.5717793", "0.57104826", "0.5709417", "0.5707871", "0.5697774", "0.5685161", "0.5683035", "0.5641111", "0.56397855", "0.56390846", "0.5637904", "0.562645", "0.5609009", "0.56027645", "0.5580549", "0.55569094", "0.55336744", "0.5526901", "0.5524853", "0.55214024", "0.5513981", "0.5504555", "0.5501949", "0.54993135", "0.5494987", "0.54902434", "0.5490157", "0.548748", "0.5481792", "0.54757965", "0.5463095", "0.54485375", "0.5442323", "0.5441284", "0.5438864", "0.5437649", "0.54375553", "0.5436868", "0.54360867", "0.54270077", "0.54262924", "0.5421869", "0.5411126", "0.5407628", "0.54071504", "0.5384271", "0.5371135", "0.5369632", "0.536337", "0.5354295", "0.5352749", "0.5352668", "0.5349874", "0.5349708", "0.53430504", "0.53430504", "0.5342124", "0.5339855", "0.533879" ]
0.7850461
0
Merges the given Item with the last one currently in the given Container if both are whitespace items. Returns True if the items were merged.
def _merge_ws(self, item, container): # type: (Item, Container) -> bool: last = container.last_item() if not last: return False if not isinstance(item, Whitespace) or not isinstance(last, Whitespace): return False start = self._idx - (len(last.s) + len(item.s)) container.body[-1] = ( container.body[-1][0], Whitespace(self._src[start : self._idx]), ) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def can_be_merged(prev, cur):\n\n WHITESPACE = (' ', '\\t')\n if not cur.mergeable or not prev.mergeable:\n return False\n # offset is char offset, not byte, so length is the char length!\n elif cur.offset != (prev.offset + prev.length):\n return False\n elif cur.text in WHITESPACE and not prev.text in WHITESPACE:\n return False\n elif prev.text in WHITESPACE and not cur.text in WHITESPACE:\n return False\n return True", "def can_be_merged(prev, cur):\n\n WHITESPACE = (' ', '\\t')\n if not cur.mergeable or not prev.mergeable:\n return False\n elif prev.delete_key_used != cur.delete_key_used:\n return False\n elif prev.start != cur.start and prev.start != cur.end:\n return False\n elif cur.text not in WHITESPACE and \\\n prev.text in WHITESPACE:\n return False\n elif cur.text in WHITESPACE and \\\n prev.text not in WHITESPACE:\n return False\n return True", "def merged(self) -> bool:\n return pulumi.get(self, \"merged\")", "def _append_with_string_merge(seq, new_item):\n if seq and isinstance(new_item, text_type) and isinstance(seq[-1], text_type):\n s = seq.pop()\n seq.append(s+new_item)\n else:\n seq.append(new_item)", "def is_merged(self):\n return self.get_data(\"state\") == self.STATE_MERGED", "def take_item(self, item):\r\n if len(self.items) <= 2:\r\n self.items.append(item)\r\n if self.got_both():\r\n self.working = True", "def MergeAttributeContainers(\n self, callback=None, maximum_number_of_containers=0):\n if not self._container_types:\n self._container_types = self._GetContainerTypes()\n\n number_of_containers = 0\n while (self._active_cursor or self._container_types\n or self._active_extra_containers):\n if not self._active_cursor and not self._active_extra_containers:\n self._PrepareForNextContainerType()\n\n containers = self._GetAttributeContainers(\n self._active_container_type, callback=callback,\n cursor=self._active_cursor,\n maximum_number_of_items=maximum_number_of_containers)\n\n if not containers:\n self._active_cursor = 0\n continue\n\n for container in containers:\n self._add_active_container_method(container)\n number_of_containers += 1\n\n if 0 < maximum_number_of_containers <= number_of_containers:\n logger.debug(\n 'Only merged {0:d} containers'.format(number_of_containers))\n return False\n\n logger.debug('Merged {0:d} containers'.format(number_of_containers))\n # While all the containers have been merged, the 'merging' key is still\n # present, so we still need to remove the store.\n self._store.Remove()\n return True", "def insert(self, item):\r\n if not self.is_full():\r\n for i in range(1,len(self.items)):\r\n if self.items[i] is None:\r\n self.items[i] = item\r\n self.size += 1\r\n self.perc_up(i)\r\n return True\r\n return False", "def is_final_item(item_id):\n return \"into\" not in items[\"data\"][str(item_id)]", "def _apply_item(self, item: Item) -> bool:\n return False", "def IsExpanded(self, item):\r\n\r\n return item.IsExpanded()", "def merge_dependency(self, item, resolve_parent, parent):\n return item in self._container_images or super(ContainerImageResolver, self).merge_dependency(item, resolve_parent, parent)", "def ParseItem(\r\n self,\r\n item: \"Statement.ItemType\",\r\n ) -> Optional[bool]:\r\n\r\n # Extract any whitespace prefix (if necessary)\r\n if self._ignore_whitespace_ctr:\r\n while True:\r\n whitespace_results = self._EatWhitespaceToken(self.normalized_iter)\r\n if whitespace_results is None:\r\n break\r\n\r\n self.results += whitespace_results\r\n self.normalized_iter = self.results[-1].IterAfter.Clone()\r\n\r\n # Extract the content\r\n if isinstance(item, TokenClass):\r\n result = self._ParseTokenItem(item)\r\n else:\r\n original_item = item\r\n if isinstance(item, Statement.NamedItem):\r\n item = item.Item\r\n\r\n extract_results_from_result_func = lambda result: result.Results\r\n\r\n if isinstance(item, Statement):\r\n result = self._ParseStatementItem(item)\r\n elif isinstance(item, DynamicStatements):\r\n result = self._ParseDynamicStatementItem(item)\r\n elif isinstance(item, tuple):\r\n result = self._ParseRepeatItem(item)\r\n elif isinstance(item, list):\r\n result = self._ParseOrItem(item)\r\n if result is not None:\r\n assert len(result.Results) == 1\r\n assert isinstance(result.Results[0], Statement.StatementParseResultItem)\r\n\r\n extract_results_from_result_func = lambda result: result.Results[0].Results\r\n else:\r\n assert False, item # pragma: no cover\r\n\r\n if result is None:\r\n return None\r\n\r\n statement_parse_result_item = Statement.StatementParseResultItem(\r\n original_item,\r\n extract_results_from_result_func(result),\r\n )\r\n\r\n self.results.append(statement_parse_result_item)\r\n self.normalized_iter = result.Iter.Clone()\r\n\r\n result = result.Success\r\n\r\n # Extract comment tokens (if any)\r\n self._ParsePotentialCommentItem()\r\n\r\n return result", "def merge(self, op):\n highest_mergable = 0\n (head_src, bytestream_src) = self.deconstruct_tail()\n (bytestream_dst, tail_dst) = op.deconstruct_head()\n for ii in range(min(len(bytestream_src), len(bytestream_dst))):\n mergable = True\n for jj in range(ii + 1):\n if not bytestream_src[-ii - 1 + jj].mergable(bytestream_dst[jj]):\n mergable = False\n break\n if mergable:\n highest_mergable = ii + 1\n if 0 >= highest_mergable:\n return False\n if is_verbose():\n print(\"Merging headers %s and %s at %i bytes.\" % (self.__name, op.__name, highest_mergable))\n for ii in range(highest_mergable):\n bytestream_src[-highest_mergable + ii].merge(bytestream_dst[ii])\n bytestream_dst[0:highest_mergable] = []\n self.reconstruct(head_src + bytestream_src)\n op.reconstruct(bytestream_dst + tail_dst)\n return True", "def deleteLast(self) -> bool:\n if not self.isEmpty():\n self.rear = self.move_backward(self.rear)\n return True\n else:\n return False", "def is_container(item):\n if isinstance(item, str):\n return False\n elif hasattr(item, \"__iter__\"):\n return True\n\n return False", "def mergable(self, op):\n if int(self.__size) != int(op.__size):\n return False\n if self.__value != op.__value:\n return False\n return True", "def _content_item_comparison_weak(item_a, item_b):\n if item_a is None or item_b is None:\n log.debug(\"Item is None\")\n return False\n\n return item_a.get_xml() == item_b.get_xml()", "def can_include(self, item: Item) -> bool:\n\n substr_match = find_matching_frame_substrings(self._base, item)\n if not substr_match:\n return False\n\n frame = substr_match.groups[1]\n\n # Dont include if the matched frame of the other item\n # already exists in this sequence frame set.\n if frame in self._frames:\n return False\n\n return True", "def _apply_item(self, item: Item) -> bool:\n if self.locked:\n self.__locked = item.item_type != self.__key\n return not self.locked", "def deleteLast(self):\n if not self.isEmpty():\n self._data.pop()\n return True\n else:\n return False", "def insert_and_check(self, item) -> bool:\n with Monitor.acquire(self):\n if item in self:\n return False\n self.add(item)\n return True", "def add_item(self, item, index):\n if index in self.d_buffer.keys():\n return True\n elif len(self) < self._size:\n self.d_buffer.update({index: item})\n return True\n else:\n return False", "def is_monotonic(items: Sequence) -> bool:\n prev_elements = set({items[0]})\n prev_item = items[0]\n\n for item in items:\n if item != prev_item:\n if item in prev_elements:\n return False\n prev_item = item\n prev_elements.add(item)\n\n return True", "def __contains__(self, item: Any) -> bool:\n if self.is_empty():\n return False\n elif self._first == item:\n return True\n else:\n return self._rest.__contains__(item)\n # Equivalently, item in self._rest", "def __contains__(self, item):\n index = bisect_left(self.sequence, item)\n if (len(self.sequence) != index) and (self.sequence[index] == item):\n return True\n return False", "def __contains__(self, item):\n if self.is_empty():\n return False\n elif self._first == item:\n return True\n else:\n return self._rest.__contains__(item)\n # Equivalently, item in self._rest", "def equals_items(self, item1, item2):\n if isinstance(item1, (text_type, binary_type)) is True:\n return self.equals_strings(item1, item2)\n\n if type(item1) is float or type(item2) is float:\n if round(item1, 4) != round(item2, 4):\n if self._verbose is True:\n logging.info(\"Float values rounded to \"\n \"4 digits are not equals: \"\n \"{:0.4f} != {:0.4f}\".format(item1, item2))\n return False\n return True\n\n if item1 != item2:\n if self._verbose is True:\n logging.info(\"Not equals: {0} {1}\".format(item1, item2))\n return False\n\n return True", "def isItem(self):\n return _libsbml.Unit_isItem(self)", "def drop(self, item: Item) -> bool:\n if item in self.bag:\n self.__bag.remove(item)\n self.room._add_item(item)\n return True\n return False", "def isLast(self):\n index = self.parentNode.idevices.index(self)\n return index == len(self.parentNode.idevices) - 1", "def _process_last(self, first, second):\n if not self.can_combine(first, second):\n # no combining\n self.combined.append(first)\n self.combined.append(second)\n else:\n # combine and terminate\n self.move_cursors_to_end(second)\n self.combine_and_select_block(first)", "def delete_last(self) -> bool:\r\n if self.size == 0:\r\n return False\r\n\r\n if self.lastIndex == 0:\r\n self.lastIndex = self.capacity - 1\r\n else:\r\n self.lastIndex -= 1\r\n self.size -= 1\r\n return True", "def _do_merge(ext, exts_other):\n for ext_other in exts_other:\n if not ext.is_duplicate(ext_other):\n return False\n return True", "def merge(self, other):\n extras = other.difference(self)\n if len(extras) > 0:\n self.update(extras)\n self.reset()\n return True\n return False", "def adaptable( item1, item2 ) :\n\n if( item2 is None ) : return( True )\n return re.fullmatch(item2, item1) is not None", "def HasChildren(self, item):\r\n\r\n return len(item.GetChildren()) > 0", "def is_allergic_to(self, item):\n if item in self.list:\n return True\n else:\n return False", "def isRootChildItem(self, source, destination, item) -> bool:\n root = self.invisibleRootItem()\n child = [root.child(n) for n in range(root.childCount())]\n return destination in child", "def add(self, item: Union[Any, Sequence[Any]]) -> None:\n if isinstance(item, Sequence) and not isinstance(item, str):\n self.contents.extend(item)\n else:\n self.contents.append(item)\n return", "def is_item_complete(self, item):\n return (item.get('id') and\n item.get('name') and\n 'description' in item and\n 'image' in item)", "def is_merged(self):\r\n url = '{0}/merge'.format(self.get_url())\r\n\r\n return http.Request('GET', url), resource.parse_boolean", "def add(self, item):\n # make sure there's enough space to fit all items\n if self.container.capacity(Item.MIN_SIDE_SIZE) < len(self.items) + 1:\n raise LayoutError(\"container too small to fit all items\")\n\n self.items.append(item)\n coords = self.item_coordinates(len(self.items))\n\n self.arrange(coords)\n\n if self.items_intersect():\n raise LayoutError(\"overlapping items\")", "def items_intersect(self):\n for a, b in combinations(self.items, 2):\n if a.intersects_with(b):\n return True\n\n return False", "def keep_item(self, content_item):\n return self._content_item_comparison_weak(\n content_item, self.touch_content_item\n )", "def add(self, item):\n if not self._first:\n self._first = item\n\n if item not in self._items.keys():\n self._items[item] = item, [self._last, None]\n if len(self._items) != 1:\n self._items[self._last][1][1] = item\n self._last = item\n return self.__len__() - 1\n else:\n return self.get_all().index(item)", "def add_item(self, item):\r\n bag_res = consts.BAG_PUT_FAILED\r\n for i in range(len(self._items)):\r\n res = self.put_item_at(i, item, allow_switch=False)\r\n if res == consts.PUT_FORBIDDEN:\r\n return consts.BAG_PUT_FAILED\r\n if res == consts.PUT_SWITCH or \\\r\n res == consts.PUT_INTO_EMPTY or \\\r\n res == consts.PUT_MERGE_TOTALLY:\r\n return consts.BAG_PUT_TOTALLY\r\n if res == consts.PUT_MERGE_PARTIALLY:\r\n bag_res = consts.BAG_PUT_PARTIALLY\r\n continue\r\n if res == consts.PUT_MERGE_FAILED or \\\r\n res == consts.PUT_SWITCH_FORBIDDEN:\r\n continue\r\n return bag_res", "def add(self, element) -> bool:\n if self.data == element.data:\n return False\n\n if self.data > element.data:\n if self.left is None:\n self.left = element\n return True\n else:\n return self.left.add(element)\n else:\n if self.right is None:\n self.right = element\n return True\n else:\n return self.right.add(element)", "def __replace_or_add_item(self, item: ClientWorklistItem):\n # print('__replace_or_add_item: __items=', self.__items)\n for i in range(len(self.__items)):\n val = self.__items[i]\n if item.id == val.id:\n self.__items[i] = item\n return\n # not found above, append it\n self.__items.append(item)", "def is_sequence(item):\n return (not hasattr(item, \"strip\") and\n (hasattr(item, \"__getitem__\") or hasattr(item, \"__iter__\")))", "def determine_inside_container(self):\n tokenum, value = self.current.tokenum, self.current.value\n ending_container = False\n starting_container = False\n\n if tokenum == OP:\n srow = self.current.srow\n scol = self.current.scol\n\n # Record when we're inside a container of some sort (tuple, list, dictionary)\n # So that we can care about that when determining what to do with whitespace\n if value in [\"(\", \"[\", \"{\"]:\n # add to the stack because we started a list\n self.containers.append((value, srow, scol))\n starting_container = True\n\n elif value in [\")\", \"]\", \"}\"]:\n # not necessary to check for correctness\n if not self.containers:\n raise SyntaxError(f\"Found a hanging '{value}' on line {srow}, column {scol}\")\n\n v, sr, sc = self.containers.pop()\n if v != {\")\": \"(\", \"]\": \"[\", \"}\": \"{\"}[value]:\n found_at = f\"line {srow}, column {scol}\"\n found_last = f\"line {sr}, column {sc}\"\n msg = \"Trying to close the wrong type of bracket\"\n msg = f\"{msg}. Found '{value}' ({found_at}) instead of closing a '{v}' ({found_last})\"\n raise SyntaxError(msg)\n\n ending_container = True\n\n self.just_ended_container = not len(self.containers) and ending_container\n self.just_started_container = len(self.containers) == 1 and starting_container\n self.in_container = (\n len(self.containers) or self.just_ended_container or self.just_started_container\n )", "def addItemTag(self, item, tag):\r\n if self.inItemTagTransaction:\r\n # XXX: what if item's parent is not a feed?\r\n if not tag in self.addTagBacklog:\r\n self.addTagBacklog[tag] = [] \r\n self.addTagBacklog[tag].append({'i': item.id, 's': item.parent.id})\r\n return \"OK\"\r\n else:\r\n return self._modifyItemTag(item.id, 'a', tag)", "def insertLast(self, value):\n if not self.isFull():\n self._data.append(value)\n return True\n else:\n return False", "def isItemScrollable(self, itemName, containerObject=None, relatedAreaEnd=None):\r\n # get all nodes matching the item name\r\n nodes = self.getItemNodes(itemName, area=None, relatedItem=containerObject, relatedAreaEnd=relatedAreaEnd)\r\n for node in nodes:\r\n if node.getAttribute('is-scrollable') == \"true\":\r\n return True\r\n\r\n # try if it was wildcard\r\n node = self.wildcardSearch(itemName, area=None, relatedItem=containerObject, relatedAreaEnd=relatedAreaEnd)\r\n if node:\r\n if node.getAttribute('is-scrollable') == \"true\":\r\n return True\r\n\r\n return False", "def OnCompareItems(self, item1, item2):\r\n\r\n # do the comparison here, and not delegate to self._main_win, in order\r\n # to let the user override it\r\n\r\n return self.GetItemText(item1) == self.GetItemText(item2)", "def bubbles_already_last_in_list(bubble_list, bubbles):\n if isinstance(bubbles, list):\n length = len(bubbles)\n else:\n length = 1\n bubbles = [bubbles]\n\n if len(bubble_list) < length:\n return False\n\n for bubble in bubbles:\n if 'message' not in bubble:\n return False\n\n start_index = - length\n is_already_in = False\n for bubble in bubbles:\n\n last = bubble_list[start_index]\n if 'message' not in last or 'message' not in bubble:\n return False\n\n text1 = unhtmlify(last['message'].lower()).strip()\n text2 = unhtmlify(bubble['message'].lower()).strip()\n is_already_in = is_already_in or (text1 == text2)\n start_index += 1\n\n return is_already_in", "def done(self):\n return self.left + 1 == self.right", "def deleteLast(self) -> bool:\n if self.isEmpty():\n return False\n\n self.rear = (self.rear - 1 + self.capacity) % self.capacity\n return True", "def collapseRight(self):\n retval = False\n for rStartInd in [i * self.col for i in range(self.row)]:\n cSlice = self.Range[rStartInd: rStartInd + self.col]\n lst = [self.get_cell(i) for i in cSlice]\n lst.reverse()\n lst, tmp = self.collapseRow(lst)\n lst.reverse()\n for i in range(self.col):\n self.set_cell(cSlice[i], lst[i])\n retval = retval or tmp\n return retval", "def right_merge(self,list_to_merge):\n self.items = self.items + list_to_merge\n return self.items", "def check_container_contains_item(context, container, item):\n assert_true(context.uuid[item] in get_container(context, container)[f\"{item}s\"])", "def insert(self, item):\n if self.type == \"lin\":\n index = self.hash(item[0])\n if self.items[index] is None:\n self.items[index] = item\n return True\n else:\n i = (index + 1) % self.n\n while i != index:\n if self.items[i] is None:\n self.items[i] = item\n return True\n i = (i + 1) % self.n\n return False\n\n if self.type == \"quad\":\n index = self.hash(item[0])\n if self.items[index] is None:\n self.items[index] = item\n return True\n else:\n j = 1\n i = (index + j**2) % self.n\n while i != index:\n if self.items[i] is None:\n self.items[i] = item\n return True\n j += 1\n i = (i + j**2) % self.n\n return False\n\n if self.type == \"sep\":\n index = self.hash(item[0])\n self.items[index].tableInsert(1, item)\n return True", "def item_diff(self, item, item_name, item_attrs):\n diff = False\n\n log_item_name = self.get_log_item_name(item_name, item_attrs)\n\n log_item = self.db.get_item(\"S3FileLog\", log_item_name, consistent_read=True)\n if log_item is None:\n diff = True\n else:\n # Got a log item, compare attributes to determine whether it is modified\n try:\n if(item['item_name'] == log_item['item_name'] and\n item['last_modified_timestamp'] != item_attrs['last_modified_timestamp']):\n diff = True\n except KeyError:\n # If last_modified does not exist\n diff = False\n\n return diff", "def isEmpty(self):\n return not bool(len(self.ItemList))", "def __contains__(self, item: Any) -> bool:\n try:\n return item in self.contents\n except TypeError:\n try:\n return item is self.contents\n except TypeError:\n return item == self.contents # type: ignore", "def match(self, item):\n return item == self._expected_item", "def has_duplicates(items):\n items = list()\n items.sort()\n for i in range(len(items) - 1):\n if items[i] == items[i + 1]:\n return True\n return False", "def has_merge(self) -> Optional[str]:\n return None", "def if_Add(self):\n if self.multiList is None:\n return True\n else:\n return False", "def mergeGroup(self):\n if len(self) < 2:\n return\n mainItem = self[0]\n for item in self[1:]:\n mainItem.textLines.extend(item.textLines)\n mainItem.height = reduce(lambda x,y: x+y, [item.height for item in\n self])", "def joinPrefixItems(self):\n newList = []\n mergeList = OutputGroup()\n for item in self:\n if mergeList and (item.level != mergeList[0].level or\n not item.prefix or\n not item.equalPrefix(mergeList[0])):\n mergeList.mergeGroup()\n newList.append(mergeList[0])\n mergeList[:] = []\n mergeList.append(item)\n if mergeList:\n mergeList.mergeGroup()\n newList.append(mergeList[0])\n self[:] = newList", "def GetLastChild(self, item):\r\n\r\n children = item.GetChildren()\r\n return (len(children) == 0 and [None] or [children[-1]])[0]", "def all_same(items):\n \n return all(x == items[0] for x in items)", "def containItem(self, value):\n\t\tif self._linkHead == None:\n\t\t\treturn False\n\n\t\t_nodeCursor = self._linkHead\n\n\t\twhile _nodeCursor != None and _nodeCursor._itemValue != value:\n\t\t\t_nodeCursor = _nodeCursor._itemNext\n\n\t\tif _nodeCursor == None:\n\t\t\treturn False\n\n\t\treturn True", "def __contains__(self, item: Any) -> bool:\n curr = self._first\n\n while curr is not None:\n if curr.item == item:\n return True\n\n curr = curr.next\n\n return False", "def add(self, item):\n \n with self.lock:\n if isinstance(item, list):\n self.items.join(item)\n else:\n self.items.append(item)", "def insertLast(self, value: int) -> bool:\n if not self.isFull():\n # 后端插入始终是先移动后插入,self.rear始终指向后端最后插入的元素\n self.rear = self.move_forward(self.rear)\n self.q[self.rear] = value\n return True\n else:\n return False", "def its_empty(self) -> bool:\n return self.items == []", "def __process_end_merging(self, finished, manager_data, tracker):\n\n if finished or self.autosave.is_time_exceed():\n while \"PyF\" in \" \".join([x.name for x in reversed(active_children())]):\n continue\n\n self.__merge_processes_data(manager_data, tracker=tracker)\n\n return True\n return False", "def mergable(self, frame):\n\t\tfor pos in self.srcList: \n\t\t\tif pos in frame.srcList:\n\t\t\t\treturn True\n\n\t\tfor pos in self.tgtList: \n\t\t\tif pos in frame.tgtList:\n\t\t\t\treturn True\n\n\t\treturn False", "def __contains__(self, item):\n if item in self._parents:\n return True\n else:\n return False", "def search(self, item):\n current = self._head\n # search until we find it or fall off the end\n while current != None:\n if current.getData() == item:\n # item has been found\n return True\n else:\n if current.getData() > item:\n # We’ve passed where the item could be.\n # Only works for ordered lists.\n return False\n else:\n current = current.getNext()\n return False", "def select_stack(self, container, index):\n\n item = container[index]\n if item is None:\n return False\n\n loop_over = enumerate # default enumerator - from start to end\n # same as enumerate() but in reverse order\n reverse_enumerate = lambda l: izip(xrange(len(l)-1, -1, -1), reversed(l))\n\n if container is self.slots.crafting or container is self.slots.fuel:\n targets = (self.inventory.storage, self.inventory.holdables)\n elif container is self.slots.crafted or container is self.slots.storage:\n targets = (self.inventory.holdables, self.inventory.storage)\n # in this case notchian client enumerates from the end. o_O\n loop_over = reverse_enumerate\n elif container is self.inventory.storage:\n if len(self.slots.storage):\n targets = (self.slots.storage,)\n else:\n targets = (self.inventory.holdables,)\n elif container is self.inventory.holdables:\n if len(self.slots.storage):\n targets = (self.slots.storage,)\n else:\n targets = (self.inventory.storage,)\n else:\n return False\n\n # find same item to stack\n initial_quantity = item_quantity = item.quantity\n while item_quantity:\n try:\n qty_before = item_quantity\n for stash in targets:\n for i, slot in loop_over(stash):\n if slot is not None and slot.holds(item) and slot.quantity < 64 \\\n and slot.primary not in blocks.unstackable:\n count = slot.quantity + item_quantity\n if count > 64:\n count, item_quantity = 64, count - 64\n else:\n item_quantity = 0\n stash[i] = slot.replace(quantity=count)\n container[index] = item.replace(quantity=item_quantity)\n self.mark_dirty(stash, i)\n self.mark_dirty(container, index)\n if item_quantity == 0:\n container[index] = None\n return True\n # one more loop for rest of items\n raise NextLoop # break to outer while loop\n # find empty space to move\n for stash in targets:\n for i, slot in loop_over(stash):\n if slot is None:\n stash[i] = item.replace(quantity=item_quantity)\n container[index] = None\n self.mark_dirty(stash, i)\n self.mark_dirty(container, index)\n return True\n if item_quantity == qty_before:\n # did one loop but was not able to put any of the items\n break\n except NextLoop:\n # used to break out of all 'for' loops\n pass\n return initial_quantity != item_quantity", "def __contains__(self, item):\n for _, _, _, cur_item in self.queue:\n if cur_item == item:\n return True\n return False", "def isFull(self):\n if self.type == \"sep\":\n for e in self.items:\n if e.tableIsEmpty():\n return False\n else:\n if None in self.items:\n return False\n\n return True", "def _add(self, item):\n if isinstance(item, Node):\n if item in self:\n return #already added\n elif item.name in self:\n if item.parent:\n #maintain consistency as we're replacing an existing item\n item.parent._remove(item)\n self._children[item.name] = item\n item._parent = self\n else:\n raise ValueError(\"Expected argument to be of type Node or one of \"\n \"its descendents\")", "def at_least_one_alive(self, containers):\n for container in self.get_standard_containers(containers):\n # Update container variables so that status is accurate.\n container.container.reload()\n if container.container.status != 'exited':\n return True\n return False", "def is_empty(self) -> bool:\n return self._items == []", "def _insert(self, item):\n if item.room is not None:\n item.room.remove(item)\n\n item.player = self\n self._inventory.append(item)\n\n # if the item is a container, add to inventory its contents\n if item.container:\n for con_item in item.items:\n self._insert(con_item)", "def add_item_to_inventory(game, *args):\n (item, action_description, already_done_description) = args[0]\n if not game.is_in_inventory(item):\n print_bold(action_description)\n game.add_to_inventory(item)\n print_italic(\"You've just got a {item}.\".format(item=item.name))\n else:\n print_italic(already_done_description)\n return False", "def is_consistent(self, item):\n targets = set(ident for ident, node in self._nodes.iteritems() \\\n if node[item] == OCCUPIED)\n return self._check_consistency(item, [self.current], targets)", "def ItemHasChildren(self, item):\r\n\r\n # consider that the item does have children if it has the \"+\" button: it\r\n # might not have them (if it had never been expanded yet) but then it\r\n # could have them as well and it's better to err on this side rather than\r\n # disabling some operations which are restricted to the items with\r\n # children for an item which does have them\r\n return item.HasPlus()", "def _mergeCalibBlocks_isMergeable(object1: Any, object2: Any) -> bool:\n if isinstance(object1, list):\n if not isinstance(object2, list):\n return False\n if len(object1) != len(object2):\n return False\n return all(\n _mergeCalibBlocks_isMergeable(elem1, elem2)\n for elem1, elem2 in zip(object1, object2)\n )\n\n if isinstance(object1, dict):\n if not isinstance(object2, dict):\n return False\n if set(object1.keys()) != set(object2.keys()):\n return False\n return all(\n _mergeCalibBlocks_isMergeable(object1[key], object2[key])\n for key in object1 if key != \"name\"\n )\n\n if isinstance(object1, str):\n if not isinstance(object2, str):\n return False\n if object1.startswith(\"arm=\") and object2.startswith(\"arm=\"):\n return True\n return object1 == object2\n\n return object1 == object2", "def got_both(self):\r\n if Item.A in self.items and Item.B in self.items:\r\n return True", "def deleteLast(self) -> bool:\n if not self.isEmpty():\n self._deque[self._rear] = None\n self._rear = (self._rear + 1)%self._k\n self._elems -= 1\n return True\n \n return False", "def is_append(self):\n return self._tag == 'append'", "def derives_empty(self):\n if self.known_to_derive_empty:\n return True\n for item in self.first():\n if item.is_empty():\n self.known_to_derive_empty = True\n return True\n return False", "def Commit(self):\n try:\n self.commit_changes([])\n return True\n except:\n return False", "def _handle_player_collide_item(self, player: Player, item: DroppedItem, data,\n arbiter: pymunk.Arbiter):\n\n if self._inventory.add_item(item.get_item()):\n print(f\"Picked up a {item!r}\")\n self._world.remove_item(item)\n\n return False", "def is_container(self):\n return (self.__type & NODE_TAG) and self.children" ]
[ "0.5630323", "0.562426", "0.51364464", "0.5081948", "0.5043025", "0.48954043", "0.48792508", "0.4865082", "0.48528847", "0.48474756", "0.48178267", "0.48116726", "0.47968078", "0.4795999", "0.478459", "0.47503677", "0.47446725", "0.47313824", "0.47310415", "0.47016767", "0.46826553", "0.46300948", "0.46248838", "0.45995674", "0.45709726", "0.45621598", "0.4540385", "0.45348996", "0.45143023", "0.4504672", "0.44615632", "0.4457242", "0.44563594", "0.44358963", "0.441614", "0.44112757", "0.4401898", "0.43976316", "0.4389052", "0.43819037", "0.43802103", "0.43680385", "0.43650636", "0.4361874", "0.4360869", "0.43461013", "0.4344704", "0.43443745", "0.4343939", "0.43353227", "0.43286392", "0.43242618", "0.43235585", "0.43191886", "0.4309224", "0.4276418", "0.4272642", "0.42724153", "0.42683843", "0.42628616", "0.42621082", "0.42479566", "0.42434645", "0.4236193", "0.42357534", "0.42344615", "0.4224436", "0.42213708", "0.4212076", "0.42111778", "0.42092508", "0.42037457", "0.4197503", "0.4186759", "0.4179756", "0.41795683", "0.4177293", "0.41735882", "0.41599455", "0.41567108", "0.4148573", "0.41447288", "0.41433117", "0.4139219", "0.4136757", "0.41313544", "0.41260237", "0.41258088", "0.4120747", "0.41104072", "0.4108282", "0.41079095", "0.4105916", "0.41039574", "0.41020972", "0.40936098", "0.4089225", "0.40788698", "0.40689456", "0.40651533" ]
0.75566405
0
Creates a generic "parse error" at the current position.
def parse_error(self, kind=ParseError, args=None): # type: () -> None line, col = self._to_linecol(self._idx) if args: return kind(line, col, *args) else: return kind(line, col)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _error(self, token, msg):\n self._interpreter.parse_error(token, msg)\n return ParseError()", "def error(self, message, token=None):\n raise ParseException(\n message,\n self.filename,\n line=self._line,\n line_number=self._line_number,\n token=token)", "def parse_error(self, message, exc_cls=VisualizerParseError):\n raise exc_cls(\"Error parsing %s '%s' (%s:%i): %s\" % \n (self.tag, self.ref, self.filename, self.lineno, message))", "def error(self, message):\r\n self._construct_partial_parser().error(message)", "def __parse_error(self, text):\n m = self.__size_expr.match(text)\n if m is not None:\n self.errcode = b\"\"\n self.errmsg = self.__read_block(int(m.group(1)) + 2)\n return\n\n m = self.__error_expr.match(text)\n if m is None:\n raise Error(\"Bad error message\")\n if m.group(1) is not None:\n self.errcode = m.group(1).strip(b\"()\")\n else:\n self.errcode = b\"\"\n self.errmsg = m.group(2).strip(b'\"')", "def parser_error(msg):\n global MESSAGES\n if CURRENT_ROW != None:\n msg = \"row \"+str(CURRENT_ROW)+\": \"+msg\n msg += \"<br/>\\n&nbsp;&nbsp;&nbsp;starting with: \"\n for col in range(5):\n val = cellval(CURRENT_ROW, col)\n if val == None:\n val = \"\"\n msg += val+\" | \"\n MESSAGES.append(\"ERROR: \"+msg)", "def error(self, msg, elem):\n if elem is not None:\n msg += \" (line %d)\" % elem.sourceline\n if self.ignore_errors:\n return self.warn(msg, elem)\n raise ParserException(msg)", "def _parse_error(self, err):\r\n self.logger.debug(err)\r\n stack = err.get(\"stack\", [])\r\n if not err[\"message\"].startswith(\"parse error:\"):\r\n err[\"message\"] = \"error: \" + err[\"message\"]\r\n errmsg = \"Octave evaluation error:\\n%s\" % err[\"message\"]\r\n\r\n if not isinstance(stack, StructArray):\r\n return errmsg\r\n\r\n errmsg += \"\\nerror: called from:\"\r\n for item in stack[:-1]:\r\n errmsg += \"\\n %(name)s at line %(line)d\" % item\r\n try: # noqa\r\n errmsg += \", column %(column)d\" % item\r\n except Exception: # noqa\r\n pass\r\n return errmsg", "def _syntax_error(self, msg, thing):\n raise TempliteSyntaxError(f\"{msg}: {thing!r}\")", "def _syntaxError(self, expected=None):\n self._strTree += \"[TOKEN-ERROR]\"\n # Si el error vino desde 'match', podemos saber que token esperariamos encontrar\n if expected is not None:\n raise SynError(SynError.UNEXPECTED_SYM, self._scanner.getPos(),\n \" - Found '\" + self._lookahead.getLexeme() + \"', expected '\" + Token(expected).getLexeme() + \"'\")\n else:\n raise SynError(SynError.UNEXPECTED_SYM, self._scanner.getPos(),\n \" - Found '\" + self._lookahead.getLexeme() + \"'\")", "def parseError( msg ):\n raise Exception( \"Parse error for client object on line {0}: {1}\".format( Campaign.currentLineNumber, msg ) )", "def syntaxError (self, s) :\r\n report = self.generateReport() + s\r\n raise Exception, report", "def _syntax_error(msg):\n try:\n filename = stream.name\n except AttributeError:\n filename = None\n return RFC822SyntaxError(filename, lineno, msg)", "def parse_error(self):\n\n # Check the table_parse_error flag\n return self.__table_parse_error", "def add_parse_error(parse_error):\n assert isinstance(parse_error, ParseError)\n ParseError.parse_errors.append(parse_error)\n message = unicode(parse_error)\n if parse_error.type == \"error\":\n ParseError.logger.error(message)\n import settings\n if not settings.settings[\"quiet\"]:\n print>>sys.stderr, message\n else:\n ParseError.logger.warning(message)", "def doomed_parser(line):\n raise exceptions.LineParseException('Error occurred')", "def add_error(\n self,\n message: str,\n position: Optional[Tuple[int, int]] = None,\n headline: Optional[Headline] = None,\n word: Optional[Word] = None,\n ) -> None:\n start: int = 0\n end: int = 0\n\n if position:\n start, end = position\n elif headline:\n start, end = self.report.get_headline_position(headline)\n elif word:\n start, end = self.report.get_word_postion(word)\n\n self.errors.append({\"message\": message, \"start\": start, \"end\": end})", "def error(self, message):\n raise ArgumentParseError(message)", "def t_error(t):\n print(\"Illegal character '%s'\" % repr(t.value[0]))\n t.lexer.skip(1)", "def _raise(self,\n message: str,\n token: Optional[Token] = None,\n verbose: bool = True) -> NoReturn:\n if not verbose:\n raise ParserException(message)\n\n if token is None:\n token = self.tokens.peek()\n\n if isinstance(token, EOF):\n message = f\"{message}, but reached end of file\"\n else:\n message = f\"{message}, but found {repr(str(token))} at {token.position!s}\"\n\n raise ParserException(message)", "def error(self, msg):\n if self.current_line and self.current_file:\n msg = '{}\\nError in {} line {}'.format(\n msg, self.current_file, self.current_line)\n return self.DirectiveError(msg)", "def test():\n\ttry:\n\t\tprint \"Raising ParseErrror.\"\n\t\traise ParseError\n\texcept ParseError:\n\t\tprint \"Caught ParseError.\"", "def _parse_error(self, error):\n error = str(error)\n # Nvidia\n # 0(7): error C1008: undefined variable \"MV\"\n m = re.match(r'(\\d+)\\((\\d+)\\)\\s*:\\s(.*)', error)\n if m:\n return int(m.group(2)), m.group(3)\n # ATI / Intel\n # ERROR: 0:131: '{' : syntax error parse error\n m = re.match(r'ERROR:\\s(\\d+):(\\d+):\\s(.*)', error)\n if m:\n return int(m.group(2)), m.group(3)\n # Nouveau\n # 0:28(16): error: syntax error, unexpected ')', expecting '('\n m = re.match(r'(\\d+):(\\d+)\\((\\d+)\\):\\s(.*)', error)\n if m:\n return int(m.group(2)), m.group(4)\n # Other ...\n return None, error", "def error_at(element: Element) -> Callable[[str], TerminalXMLParseError]:\n\n def error(message: str) -> TerminalXMLParseError:\n return TerminalXMLParseError(element.file, element.opening_line, message)\n\n return error", "def get_parse_error(code):\r\n # note that this uses non-public elements from stdlib's tabnanny, because tabnanny\r\n # is (very frustratingly) written only to be used as a script, but using it that way\r\n # in this context requires writing temporarily files, running subprocesses, blah blah blah\r\n code_buffer = StringIO(code)\r\n try:\r\n tabnanny.process_tokens(tokenize.generate_tokens(code_buffer.readline))\r\n except tokenize.TokenError, err:\r\n return \"Could not parse code: %s\" % err\r\n except IndentationError, err:\r\n return \"Indentation error: %s\" % err\r\n except tabnanny.NannyNag, err:\r\n return \"Ambiguous tab at line %d; line is '%s'.\" % (err.get_lineno(), err.get_line())\r\n return None", "def _ast_node_or_parse_exception(self):\n # This attribute may also be set by __construct_from_annotated_ast(),\n # in which case this code does not run.\n try:\n return _parse_ast_nodes(\n self.text, self._input_flags, self._auto_flags, \"exec\")\n except Exception as e:\n # Add the filename to the exception message to be nicer.\n if self.text.filename:\n try:\n e = type(e)(\"While parsing %s: %s\" % (self.text.filename, e))\n except TypeError:\n # Exception takes more than one argument\n pass\n # Cache the exception to avoid re-attempting while debugging.\n return e", "def __str__(self):\n return \"ParseException: %s\" % self.__msg", "def error(self, message, location):\n raise CompilerError(message, loc=location)", "def cursor_error(cls, val):\n return cls('cursor_error', val)", "def cursor_error(cls, val):\n return cls('cursor_error', val)", "def _show_err(self, msg, lineno, lexpos):\n # get the entire string we just tried to parse\n data = self.lexerObj.lexer.lexdata\n s = data.split('\\n')\n\n col = _find_column(data, lexpos)\n line = s[lineno-1]\n\n leader = 3*' '\n print \"-\"*72\n print \"cvx4py error on line %s:\" % lineno\n print leader, \"\"\">> %s \"\"\" % line.strip()\n print leader, \" \" + (\" \"*(col-1)) + \"^\"\n print\n print \"ERROR:\", msg\n print \"-\"*72", "def error(msg):\n return ErrorRule(msg)", "def parse_error(self, error: Union[str, Exception],\n elem: Optional[ElementType] = None,\n validation: Optional[str] = None) -> None:\n if validation is not None:\n check_validation_mode(validation)\n else:\n validation = self.validation\n\n if validation == 'skip':\n return\n elif elem is None:\n elem = self.elem\n elif not is_etree_element(elem):\n msg = \"the argument 'elem' must be an Element instance, not {!r}.\"\n raise XMLSchemaTypeError(msg.format(elem))\n\n if isinstance(error, XMLSchemaParseError):\n error.validator = self\n error.namespaces = getattr(self, 'namespaces', None)\n error.elem = elem\n error.source = getattr(self, 'source', None)\n elif isinstance(error, Exception):\n message = str(error).strip()\n if message[0] in '\\'\"' and message[0] == message[-1]:\n message = message.strip('\\'\"')\n error = XMLSchemaParseError(self, message, elem)\n elif isinstance(error, str):\n error = XMLSchemaParseError(self, error, elem)\n else:\n msg = \"'error' argument must be an exception or a string, not {!r}.\"\n raise XMLSchemaTypeError(msg.format(error))\n\n if validation == 'lax':\n self.errors.append(error)\n else:\n raise error", "def error(self, t):\n print(\"Illegal character '%s' in line %i\" % (t.value[0], self.lineno))\n raise Exception(\"Illegal character '%s' in line %i\" %\n (t.value[0], self.lineno))", "def directive_error(self, level, message):\n return DirectiveError(level, message)", "def test05(self):\n\n s = \"a\"\n with self.assertRaises(ParserException):\n t = parse_newick(s)", "def error(self, message: str) -> None:\n lines = message.split('\\n')\n linum = 0\n formatted_message = ''\n for line in lines:\n if linum == 0:\n formatted_message = 'Error: ' + line\n else:\n formatted_message += '\\n ' + line\n linum += 1\n\n self.print_usage(sys.stderr)\n\n # Format errors with style_warning()\n formatted_message = ansi.style_warning(formatted_message)\n self.exit(2, '{}\\n\\n'.format(formatted_message))", "def showsyntaxerror(self, filename=None):\n type, value, sys.last_traceback = sys.exc_info()\n sys.last_type = type\n sys.last_value = value\n if filename and type is SyntaxError:\n # Work hard to stuff the correct filename in the exception\n try:\n msg, (dummy_filename, lineno, offset, line) = value\n except:\n # Not the format we expect; leave it alone\n pass\n else:\n # Stuff in the right filename\n try:\n # Assume SyntaxError is a class exception\n value = SyntaxError(msg, (filename, lineno, offset, line))\n except:\n # If that failed, assume SyntaxError is a string\n value = msg, (filename, lineno, offset, line)\n self.SyntaxTB(type,value,[])", "def unexpected_error(self, exception):", "def error(self, error):\n pass", "def error(self, message=None, show_help=True):", "def parsed_error_msg(self):\r\n return self.error_msg", "def stateError(cls, parserState: ParserState):\r\n\t\traise BlockParserException(\"Reached unreachable state!\")", "def __init__(self, *args):\n this = _libsbml.new_XMLError(*args)\n try: self.this.append(this)\n except: self.this = this", "def _ps_error(e):\n\n error(None, str(e))", "def parse_line_err(self, match):\n self.line = match.group(2)\n self.filename = match.group(1)\n self.message = match.group(4)\n self.keyword = match.group(3)\n\n self.fix_filename()\n self.fix_namespaces()\n self.fix_nonerrors()\n\n if self.message.strip() == \"ld returned 1 exit status\":\n self.error_type = \"Linking Error\"\n self.signal_eof = True\n return \"\"\n\n return_value = \"\"\n if self.filename is not None:\n return_value += \"{}:\".format(self.filename)\n if self.line is not None:\n return_value += \"{}: \".format(self.line)\n if self.keyword is not None:\n return_value += \"{}: \".format(self.keyword)\n if self.message is not None:\n return_value += self.message\n\n if return_value != \"\":\n return_value += \"\\n\"\n return return_value", "def create_exception(self, msg: str):", "def parseError(message):\n print(\"config error in \" + config_file + \": \" + message, file=sys.stderr)", "def default_error_recovery(self, context):\n return None, context.position + 1 \\\n if context.position < len(context.input_str) else None", "def error(self, e):\n return \"{}: {} ({})\".format(e.__class__.__name__, e.__doc__, e.message)", "def test_lexing_error():\n with pytest.raises(SyntaxError):\n lex._lexer(None, None)._load_text(\"TEST\")._throw_lexing_error()", "def parse_error (self, error_str):\r\n\t\t# Regex out the error and channel indices from the string\r\n\t\tob = re.match(ERROR_FORMAT, error_str)\r\n\t\t\r\n\t\t# If error_str doesn't match an error, return None\r\n\t\tif ob is None:\r\n\t\t\treturn None\r\n\t\t\r\n\t\t# Extract the two matched groups (i.e. the error and channel indices)\r\n\t\terrno,chno = ob.groups()\r\n\t\terrno = int(errno)\r\n\t\tchno = int(chno)\r\n\t\t\r\n\t\t# Get the error description; if none is defined, mark as unrecognised\r\n\t\terrdesc = self.error_desc_dict.get(errno, 'Unrecognised error code.').format(ch=chno)\r\n\t\t\r\n\t\treturn {'type':'err', 'id':errno, 'ch':chno, 'desc':errdesc, 'raw':error_str}", "def make_error(self) -> Optional[str]:\n info = self._info\n if info is None:\n return None\n startl, endl = info.line, info.endline\n\n return \"\\n\".join(((f\"On line {startl + 1}:\"\n if startl == endl else\n f\"On lines {startl + 1} to {endl + 1}:\"),\n self.highlight_lines))", "def __init__(self, *args, **kw):\n try:\n return self._parse_args(*args, **kw)\n except (DateError, TimeError, DateTimeError):\n raise\n except Exception:\n raise SyntaxError('Unable to parse {}, {}'.format(args, kw))", "def error_check(self, message):\n matches = ERROR_SYNTAX.match(message)\n if matches:\n error_code = int(matches.group(1))\n error_message = matches.group(2)\n return error_code, error_message\n return None", "def xerr(self, i):\n return self.errors[0][i]", "def findParsingFailure(self, s):\n\n rest = s\n matches = []\n for i in range(len(self.reParts)):\n thisre = '\\s*' + self.reParts[i] + '(.*)'\n m = re.match(thisre, rest, re.VERBOSE|re.IGNORECASE)\n if not m:\n if i == 0:\n dtype = self.name\n else:\n dtype = self.dtypes[i-1][0]\n raise RuntimeError('Cannot parse field %d (%s) at: %s; previous matches: %s' % (i, dtype, rest, ';'.join(matches)))\n newRest = m.groups()[-1]\n matchedText = rest[:-len(newRest)]\n matches.append(matchedText)\n rest = newRest\n raise RuntimeError('Hunh? Failed to find parsing error in %s' % s)", "def __init__(self, *args):\n this = _libsbml.new_SBMLError(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, line):\n # Throw an exception if we don't see the parenthesis that mark a cause\n if not line[108] == '(':\n raise ParsingException\n if not line[159:160] == ')':\n raise ParsingException\n\n # Parsing definitions\n self.cause = line[109:159].strip()", "def error(*args, noContext: bool=True, showLineNumber: bool=True, **kwargs)->None:\n pass", "def _errpos(self, fpos):\r\n filename, string = self._includestack[-1]\r\n return filename, srow(string, fpos), scol(string, fpos)", "def error(self, message, new_line=True):\n #\n # Note that while the call to \"get_caller()\" is costly, it only happens\n # when an error occurs, so it shouldn't impact performance\n #\n error_data = (message, self.get_caller())\n self._errors.append(error_data)", "def error(self, *args, **kwargs):", "def __init__(self, msg):\n\n super(DBSyntaxError, self).__init__(msg)\n self.msg = msg", "def error(str):\n\n Utils.send('error', str)", "def _parse(self):\n try:\n # parse token stream into abstract syntax tree (AST)\n self._ast = self._rule_container()\n\n except ParseError:\n raise\n\n except Exception as exc:\n raise ParseError(u'Unexpected error: {0}'.format(unicode(exc)))", "def command_error(fmt, *args, **kwargs):\n raise CommandError(fmt.format(*args, **kwargs))", "def test_lsusb_parse_error_generic(self):\n self.assertRaises(ParseError, jc.parsers.lsusb.parse, self.generic_lsusb_t, quiet=True)", "def error(self, messages=None):\n return StateError(\n created_at=self.created_at,\n started_at=self.started_at,\n messages=messages\n )", "def error(self):\n pass", "def set_error(self, index: int) -> None:\n ...", "def _make_child_error(msg, module, name, traceback, log, log_type, context):\n return ChildError(msg, module, name, traceback, log, log_type, context)", "def __init__(self):\n try:\n # this succeeds with python 2\n import StringIO\n class_StringIO = StringIO.StringIO\n except Exception:\n # this succeeds with python 3\n import io\n class_StringIO = io.StringIO\n\n # create some XML with an error\n sio = class_StringIO( \"<foo> <bar> </foo>\\n\" )\n try:\n ET.parse( sio )\n except Exception:\n self.ET_exc_class = sys.exc_info()[0]\n else:\n # something is wrong; the drawback to this fallback is that you\n # cannot distinguish an XML error from other errors\n self.ET_exc_class = Exception", "def format_exception_only(etype, value):\n # Gracefully handle (the way Python 2.4 and earlier did) the case of\n # being called with (None, None).\n if etype is None:\n return [_format_final_exc_line(etype, value)]\n\n stype = etype.__name__\n smod = etype.__module__\n if smod not in (\"__main__\", \"builtins\", \"exceptions\"):\n stype = smod + '.' + stype\n\n if not issubclass(etype, SyntaxError):\n return [_format_final_exc_line(stype, value)]\n\n # It was a syntax error; show exactly where the problem was found.\n lines = []\n filename = value.filename or \"<string>\"\n lineno = str(value.lineno) or '?'\n lines.append(' File \"%s\", line %s\\n' % (filename, lineno))\n badline = value.text\n offset = value.offset\n if badline is not None:\n lines.append(' %s\\n' % badline.strip())\n if offset is not None:\n caretspace = badline.rstrip('\\n')[:offset].lstrip()\n # non-space whitespace (likes tabs) must be kept for alignment\n caretspace = ((c.isspace() and c or ' ') for c in caretspace)\n # only three spaces to account for offset1 == pos 0\n lines.append(' %s^\\n' % ''.join(caretspace))\n msg = value.msg or \"<no detail available>\"\n lines.append(\"%s: %s\\n\" % (stype, msg))\n return lines", "def error(self, code, message=None):\n return xpath_error(code, message, self, self.error_prefix)", "def getCompilerError():", "def raise_error(field: str, message: str, parent_error: Optional[Exception] = None) -> NoReturn:\n if parent_error is None:\n raise RowGenParseError(json, field, message)\n raise RowGenParseError(json, field, message) from parent_error", "def test_errored_parsing(parser_class, raw_file, exception):\n with open(raw_file, \"rb\") as file_obj:\n parser = parser_class(raw=file_obj.read())\n\n with pytest.raises(exception):\n parser.process()", "def syntaxerr_memoized_parse_block(code):\n to_raise = None\n try:\n return memoized_parse_block(code)\n except CoconutException as err:\n to_raise = err.syntax_err()\n raise to_raise", "def error(self):\n raise NotImplementedError(\"subclasses need to override this method\")", "def parse(self, input_str, position=0, file_name=None, context=None):\n\n if self.debug:\n a_print(\"*** PARSING STARTED\", new_line=True)\n\n self.errors = []\n\n next_token = self._next_token\n debug = self.debug\n self.file_name = file_name\n self.in_error_recovery = False\n\n context = self._get_init_context(context, input_str, position,\n file_name)\n assert isinstance(context, Context)\n\n self._init_dynamic_disambiguation(context)\n self.state_stack = state_stack = [StackNode(context, None)]\n\n while True:\n cur_state = state_stack[-1].context.state\n if debug:\n a_print(\"Current state:\", str(cur_state.state_id),\n new_line=True)\n\n if context.token_ahead is None:\n if not self.in_layout:\n self._skipws(context)\n if self.debug:\n h_print(\"Layout content:\",\n \"'{}'\".format(context.layout_content),\n level=1)\n\n context.token_ahead = next_token(context)\n\n if debug:\n h_print(\"Context:\", position_context(context), level=1)\n h_print(\"Tokens expected:\",\n expected_symbols_str(cur_state.actions.keys()),\n level=1)\n h_print(\"Token ahead:\", context.token_ahead, level=1)\n\n actions = cur_state.actions.get(context.token_ahead.symbol)\n if not actions and not self.consume_input:\n # If we don't have any action for the current token ahead\n # see if we can finish without consuming the whole input.\n actions = cur_state.actions.get(STOP)\n\n if not actions:\n\n symbols_expected = list(cur_state.actions.keys())\n tokens_ahead = self._get_all_possible_tokens_ahead(context)\n if not self.in_error_recovery:\n error = self._create_error(\n context, symbols_expected,\n tokens_ahead,\n symbols_before=[cur_state.symbol])\n else:\n error = self.errors[-1]\n\n if self.error_recovery:\n if self._do_recovery(context, error):\n self.in_error_recovery = True\n continue\n\n raise error\n\n # Dynamic disambiguation\n if self.dynamic_filter:\n actions = self._dynamic_disambiguation(context, actions)\n\n # If after dynamic disambiguation we still have at least one\n # shift and non-empty reduction or multiple non-empty\n # reductions raise exception.\n if len([a for a in actions\n if (a.action is SHIFT)\n or ((a.action is REDUCE) and len(a.prod.rhs))]) > 1:\n raise DynamicDisambiguationConflict(context, actions)\n\n # If dynamic disambiguation is disabled either globaly by not\n # giving disambiguation function or localy by not marking\n # any production dynamic for this state take the first action.\n # First action is either SHIFT while there might be empty\n # reductions, or it is the only reduction.\n # Otherwise, parser construction should raise an error.\n act = actions[0]\n\n if act.action is SHIFT:\n cur_state = act.state\n\n if debug:\n a_print(\"Shift:\",\n \"{} \\\"{}\\\"\"\n .format(cur_state.state_id,\n context.token_ahead.value)\n + \" at position \" +\n str(pos_to_line_col(context.input_str,\n context.position)), level=1)\n\n new_position = context.position + len(context.token_ahead)\n context = Context(\n state=act.state,\n start_position=context.position,\n end_position=new_position,\n token=context.token_ahead,\n layout_content=context.layout_content_ahead,\n position=new_position,\n context=context)\n\n result = self._call_shift_action(context)\n state_stack.append(StackNode(context, result))\n\n self.in_error_recovery = False\n\n elif act.action is REDUCE:\n # if this is EMPTY reduction try to take another if\n # exists.\n if len(act.prod.rhs) == 0:\n if len(actions) > 1:\n act = actions[1]\n context.production = production = act.prod\n\n if debug:\n a_print(\"Reducing\", \"by prod '{}'.\".format(production),\n level=1)\n\n r_length = len(production.rhs)\n top_stack_context = state_stack[-1].context\n if r_length:\n start_reduction_context = state_stack[-r_length].context\n subresults = [x.result for x in state_stack[-r_length:]]\n del state_stack[-r_length:]\n cur_state = state_stack[-1].context.state.gotos[\n production.symbol]\n context = Context(\n state=cur_state,\n start_position=start_reduction_context.start_position,\n end_position=top_stack_context.end_position,\n position=top_stack_context.position,\n production=production,\n token_ahead=top_stack_context.token_ahead,\n layout_content=start_reduction_context.layout_content,\n layout_content_ahead=top_stack_context.layout_content_ahead, # noqa\n context=context)\n else:\n subresults = []\n cur_state = cur_state.gotos[production.symbol]\n context = Context(\n state=cur_state,\n start_position=context.end_position,\n end_position=context.end_position,\n position=context.position,\n production=production,\n token_ahead=top_stack_context.token_ahead,\n layout_content='',\n layout_content_ahead=top_stack_context.layout_content_ahead, # noqa\n context=context)\n\n # Calling reduce action\n result = self._call_reduce_action(context, subresults)\n state_stack.append(StackNode(context, result))\n\n elif act.action is ACCEPT:\n if debug:\n a_print(\"SUCCESS!!!\")\n assert len(state_stack) == 2\n if self.return_position:\n return state_stack[1].result, context.position\n else:\n return state_stack[1].result", "def error(self):\n ...", "def test_parse_invalid_file(self):\n with pytest.raises(ParserError):\n self.parser.parse(\"invalid csv\")", "def error(self, msg, details = \"\" ):\n\n if details is not None:\n msg += \"\\n\\n\" + details\n\n if not self.is_subprocess:\n self.parser.error(msg)\n else:\n raise Exception(msg)", "def report_next_token_error(\n self,\n context,\n token,\n extra_error_information=None,\n line_number_delta=0,\n column_number_delta=0,\n use_original_position=False,\n ):\n context.add_triggered_rule(\n context.scan_file,\n (token.original_line_number if use_original_position else token.line_number)\n + line_number_delta,\n (\n token.original_column_number\n if use_original_position\n else token.column_number\n )\n + column_number_delta\n if column_number_delta >= 0\n else -column_number_delta,\n self.get_details().plugin_id,\n self.get_details().plugin_name,\n self.get_details().plugin_description,\n extra_error_information,\n )", "def __init__(self,value,message):\n ValueError.__init__(self,value,message)", "def parse(self, input_str, position=0, file_name=None, extra=None):\n\n if self.debug:\n a_print(\"*** PARSING STARTED\", new_line=True)\n\n self.input_str = input_str\n self.file_name = file_name\n self.extra = {} if extra is None else extra\n\n self.errors = []\n self.in_error_recovery = False\n self.accepted_head = None\n\n next_token = self._next_token\n debug = self.debug\n\n start_head = LRStackNode(self, self.table.states[0], 0, position)\n self._init_dynamic_disambiguation(start_head)\n self.parse_stack = parse_stack = [start_head]\n\n while True:\n head = parse_stack[-1]\n cur_state = head.state\n if debug:\n a_print(\"Current state:\", str(cur_state.state_id),\n new_line=True)\n\n if head.token_ahead is None:\n if not self.in_layout:\n self._skipws(head, input_str)\n if self.debug:\n h_print(\"Layout content:\",\n \"'{}'\".format(head.layout_content),\n level=1)\n\n head.token_ahead = next_token(head)\n\n if debug:\n h_print(\"Context:\",\n position_context(head.input_str,\n head.position), level=1)\n h_print(\"Tokens expected:\",\n expected_symbols_str(cur_state.actions.keys()),\n level=1)\n h_print(\"Token ahead:\", head.token_ahead, level=1)\n\n actions = None\n if head.token_ahead is not None:\n actions = cur_state.actions.get(head.token_ahead.symbol)\n if not actions and not self.consume_input:\n # If we don't have any action for the current token ahead\n # see if we can finish without consuming the whole input.\n actions = cur_state.actions.get(STOP)\n\n if not actions:\n\n symbols_expected = list(cur_state.actions.keys())\n tokens_ahead = self._get_all_possible_tokens_ahead(head)\n self.errors.append(self._create_error(\n head, symbols_expected,\n tokens_ahead,\n symbols_before=[cur_state.symbol]))\n\n if self.error_recovery:\n if self.debug:\n a_print(\"*** STARTING ERROR RECOVERY.\",\n new_line=True)\n if self._do_recovery():\n # Error recovery succeeded\n if self.debug:\n a_print(\n \"*** ERROR RECOVERY SUCCEEDED. CONTINUING.\",\n new_line=True)\n continue\n else:\n break\n else:\n break\n\n # Dynamic disambiguation\n if self.dynamic_filter:\n actions = self._dynamic_disambiguation(head, actions)\n\n # If after dynamic disambiguation we still have at least one\n # shift and non-empty reduction or multiple non-empty\n # reductions raise exception.\n if len([a for a in actions\n if (a.action is SHIFT)\n or ((a.action is REDUCE) and len(a.prod.rhs))]) > 1:\n raise DynamicDisambiguationConflict(head, actions)\n\n # If dynamic disambiguation is disabled either globaly by not\n # giving disambiguation function or localy by not marking\n # any production dynamic for this state take the first action.\n # First action is either SHIFT while there might be empty\n # reductions, or it is the only reduction.\n # Otherwise, parser construction should raise an error.\n act = actions[0]\n\n if act.action is SHIFT:\n cur_state = act.state\n\n if debug:\n a_print(\"Shift:\",\n \"{} \\\"{}\\\"\"\n .format(cur_state.state_id,\n head.token_ahead.value)\n + \" at position \" +\n str(pos_to_line_col(self.input_str,\n head.position)), level=1)\n\n new_position = head.position + len(head.token_ahead)\n new_head = LRStackNode(\n self,\n state=act.state,\n frontier=head.frontier + 1,\n token=head.token_ahead,\n layout_content=head.layout_content_ahead,\n position=new_position,\n start_position=head.position,\n end_position=new_position\n )\n new_head.results = self._call_shift_action(new_head)\n parse_stack.append(new_head)\n\n self.in_error_recovery = False\n\n elif act.action is REDUCE:\n # if this is EMPTY reduction try to take another if\n # exists.\n if len(act.prod.rhs) == 0:\n if len(actions) > 1:\n act = actions[1]\n production = act.prod\n\n if debug:\n a_print(\"Reducing\", \"by prod '{}'.\".format(production),\n level=1)\n\n r_length = len(production.rhs)\n if r_length:\n start_reduction_head = parse_stack[-r_length]\n results = [x.results for x in parse_stack[-r_length:]]\n del parse_stack[-r_length:]\n next_state = parse_stack[-1].state.gotos[production.symbol]\n new_head = LRStackNode(\n self,\n state=next_state,\n frontier=head.frontier,\n position=head.position,\n production=production,\n start_position=start_reduction_head.start_position,\n end_position=head.end_position,\n token_ahead=head.token_ahead,\n layout_content=start_reduction_head.layout_content,\n layout_content_ahead=head.layout_content_ahead\n )\n else:\n # Empty reduction\n results = []\n next_state = cur_state.gotos[production.symbol]\n new_head = LRStackNode(\n self,\n state=next_state,\n frontier=head.frontier,\n position=head.position,\n production=production,\n start_position=head.end_position,\n end_position=head.end_position,\n token_ahead=head.token_ahead,\n layout_content='',\n layout_content_ahead=head.layout_content_ahead\n )\n\n # Calling reduce action\n new_head.results = self._call_reduce_action(new_head, results)\n parse_stack.append(new_head)\n\n elif act.action is ACCEPT:\n self.accepted_head = head\n break\n\n if self.accepted_head:\n if debug:\n a_print(\"SUCCESS!!!\")\n if self.return_position:\n return parse_stack[1].results, parse_stack[1].position\n else:\n return parse_stack[1].results\n else:\n raise self.errors[-1]", "def __init__(self, source_text, syntax_error_ctor):\n self.src = source_text\n self.syntax_error_ctor = syntax_error_ctor", "def error(msg):\n\n raise Exception(msg)", "def error(cls, message, *args, **kwargs):\n warnings.warn(\n cls.marker_theme.error() + cls.time() + cls.parse(message), *args, **kwargs\n )", "def ErrorString(self): # real signature unknown; restored from __doc__\n pass", "def to_error(self, e):\n # convert from 1-based to 0-based\n line = max(0, int(e.int_line()) - 1)\n if e.error.column != \"None\":\n start = max(0, int(e.error.column) - 1)\n end = start + 1\n if hasattr(e.error, \"end_column\"):\n end = max(0, int(e.error.end_column) - 1)\n else:\n start = 0\n end = len(e.get_line()) - 1\n return {\n # The range at which the message applies.\n \"range\": {\n \"start\": {\"line\": line, \"character\": start},\n \"end\": {\"line\": line, \"character\": end},\n },\n \"message\": e.short_message(),\n \"severity\": DiagnosticSeverity.Error,\n }", "def __gotoSyntaxError(self):\n self.activeWindow().gotoSyntaxError()", "def _parse_store_error(self, response):\n default_msg = \"Failure working with the Store: [{}] {!r}\".format(\n response.status_code, response.content\n )\n try:\n error_data = response.json()\n except ValueError:\n return default_msg\n\n try:\n error_info = [(error[\"message\"], error[\"code\"]) for error in error_data[\"error-list\"]]\n except (KeyError, TypeError):\n return default_msg\n\n if not error_info:\n return default_msg\n\n messages = []\n for msg, code in error_info:\n if code:\n msg += \" [code: {}]\".format(code)\n messages.append(msg)\n return \"Store failure! \" + \"; \".join(messages)", "def error(self, msg, *args, **kwargs):\n pass", "def create_error(test, time, error):\n info = _TestInfo(test, time)\n info._error = error\n return info", "def test_parseMethodExceptionLogged(self):\n\n class UnhandledException(Exception):\n \"\"\"\n An unhandled exception.\n \"\"\"\n\n def raisesValueError(line):\n raise UnhandledException\n\n self.server.parseState = \"command\"\n self.server.parse_command = raisesValueError\n\n self.server.lineReceived(b\"invalid\")\n\n self.assertTrue(self.flushLoggedErrors(UnhandledException))", "def parse_syntax_result(result):\n match_result = re.compile(\"(?P<error>\\w+\\s\\w+) at or near\"\n \" '(?P<near>\\S+)', line (?P<line>\\d+), in (?P<module>\\S+)\")\n used_mod_re = re.compile(\"Module:\\s(\\S+)\\s\\s+Errors:\")\n # noinspection SpellCheckingInspection\n error_re = re.compile(\"Errors:\\s+(.*)\\sat\\sor\\snear \")\n\n if \"No issues found!\" in result:\n sys.stdout.write(\"No issues found!\")\n\n if \"Errors\" in result:\n parsed_output = match_result.findall(result)\n used_mod = used_mod_re.findall(result)\n errors = error_re.findall(result)\n\n if parsed_output and used_mod:\n\n for item in parsed_output:\n\n error = (\"Found errors \\'\" + str(errors[0]) + \"\\' in: \" + str(used_mod[0]) +\n \"\\nModule: \" + str(used_mod[0]) + \", Error: \" + str(item[0]) +\n \", Near: \" + str(item[1]) + \", Line: \" + str(item[2] + \"\\n\"))\n\n sys.stderr.write(error)", "def error(self, messages=None):\n ts = utc_now()\n return StateError(\n created_at=self.created_at,\n started_at=ts,\n stopped_at=ts,\n messages=messages\n )", "def test_parser_exception(self):\n # file contains 1 invalid sample values, 17 PH records total\n self.create_sample_data_set_dir('node59p1_bad.dat', TELEM_DIR, \"node59p1.dat\")\n\n self.assert_initialize()\n\n self.event_subscribers.clear_events()\n result = self.get_samples(DataParticleType.CONTROL, 1)\n result = self.get_samples(DataParticleType.SAMPLE, 16, 30)\n self.assert_sample_queue_size(DataParticleType.CONTROL, 0)\n self.assert_sample_queue_size(DataParticleType.SAMPLE, 0)\n\n # Verify an event was raised and we are in our retry state\n self.assert_event_received(ResourceAgentErrorEvent, 10)\n self.assert_state_change(ResourceAgentState.STREAMING, 10)" ]
[ "0.7336168", "0.7223053", "0.6826519", "0.6767211", "0.65252376", "0.64913183", "0.6489117", "0.64567614", "0.6366437", "0.6286208", "0.6225419", "0.6205159", "0.61615443", "0.60905075", "0.60646534", "0.60110784", "0.6001469", "0.59982574", "0.5976438", "0.59754544", "0.59685206", "0.59616995", "0.59043974", "0.58802694", "0.5878516", "0.5851457", "0.58363944", "0.58024424", "0.5792381", "0.5792381", "0.5745554", "0.5736726", "0.570562", "0.5678451", "0.56494343", "0.56412625", "0.5569945", "0.55474067", "0.55433345", "0.5528777", "0.55166787", "0.5500794", "0.5490681", "0.5449573", "0.5428285", "0.54273164", "0.5422128", "0.5405224", "0.5395305", "0.5391299", "0.5388901", "0.536605", "0.5360731", "0.5341682", "0.5323428", "0.53221434", "0.5321318", "0.53149134", "0.5311577", "0.5295109", "0.52739125", "0.5269004", "0.5266819", "0.5266049", "0.5264886", "0.5264111", "0.52549833", "0.52506226", "0.525004", "0.52272815", "0.52267945", "0.52236515", "0.5222225", "0.5184565", "0.5181221", "0.51800895", "0.51730984", "0.5171176", "0.5159817", "0.51563615", "0.5154059", "0.5154007", "0.5153541", "0.514977", "0.5149473", "0.5146995", "0.5146207", "0.51458097", "0.5140984", "0.51237303", "0.5121795", "0.51211655", "0.51189137", "0.5117472", "0.51147515", "0.5107556", "0.5104902", "0.51033366", "0.5085512", "0.50826454" ]
0.64861685
7
Returns whether a key is strictly a child of another key. AoT siblings are not considered children of one another.
def _is_child(self, parent, child): # type: (str, str) -> bool return child != parent and child.startswith(parent + ".")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_child_exists(self, key):\n return True if _Node.__find_key_in_level(self, key) else False", "def __contains__(self, key):\n\n if type(key) != self.type:\n return False\n\n first_char = key[:1]\n others = key[1:]\n\n if first_char not in self.children:\n return False\n\n if len(first_char) != 0 and len(others) == 0:\n node = self.children[first_char]\n\n if node.value is None:\n return False\n\n return True\n else:\n return others in self.children[first_char]", "def islchild(self):\n\t\tif (self.parent() and self.parent().lchild() is self): #TODO is or == here\n\t\t\treturn True\n\t\treturn False", "def is_child(self, kid, mother): \n mom_node = self.names_to_nodes[mother] \n child_node = self.names_to_nodes[kid]\n return mom_node.is_child(child_node)", "def has_parent_key(self):\n if self.is_root():\n return False\n try:\n self.parent_key()\n return True\n except ParseException:\n return False", "def is_subhalo(self, childid, parentid):\n if (childid in self._halos[parentid].properties['children']):\n return True\n else:\n return False", "def is_right_child(self):\n if self.parent == None:\n return False\n\n return self.parent.right == self", "def has_child(self):\n return False", "def is_child_of(self, *args):\n return _ida_hexrays.cexpr_t_is_child_of(self, *args)", "def internal(self):\n if self._leftchild or self._rightchild:\n return True\n return False", "def _duplicate_child_allowed_check(self):\n\n for rule in self.options[\n 'parent_allows_duplicate_child']:\n if self.lineage_test(rule):\n return True\n return False", "def insert(self, key: int) -> bool:\n if self.empty(): # empty tree, so value becomes the root\n self.root = Node(key)\n return True\n\n current = self.root # start at the root\n while current.key != key:\n\n if key < current.key:\n\n if current.left is None: # if no left child exists, insert element as left child\n self.root = current.add_left(key=key)\n return True\n\n else: # if a left child does exist, traverse left\n current = current.left\n\n elif key > current.key:\n\n if current.right is None: # if no right child exists, insert element as right child\n self.root = current.add_right(key=key)\n return True\n\n else: # if a right child does exist, traverse right\n current = current.right\n\n return False # failure to insert", "def is_ancestor(parent_alphabet, child_alphabet):\r\n alphabet = parent_alphabet\r\n while alphabet:\r\n if child_alphabet == alphabet:\r\n return True\r\n alphabet = alphabet.alphabet\r\n return False", "def __contains__(self, other) -> bool:\n\n return other in self.children or any(other in child for child in self.children)", "def isRootChildItem(self, source, destination, item) -> bool:\n root = self.invisibleRootItem()\n child = [root.child(n) for n in range(root.childCount())]\n return destination in child", "def is_right_child(self):\n is_right_child = False\n parent = self.get_parent()\n if parent is not None:\n is_right_child = parent.get_right() == self\n\n return is_right_child", "def is_state_a_child(child: State, parent: State) -> bool:\n if child.x >= parent.x and child.y >= parent.y and child.x <= parent.x + parent.width and child.y<=parent.y+parent.height:\n return True\n return False", "def is_left_child(self):\n if self.parent == None:\n return False\n\n return self.parent.left == self", "def __find_key_in_level(node, key):\n for child in node.children:\n if child.key == key:\n return child\n\n return False", "def is_descendant(self, other):\n return other.is_ancestor(self)", "def is_allowed_to_have_child_terms(self):\n return self._is_allowed_to_have_child_terms", "def _is_foreign_key(self, key):\n return self._in_keys(key, self._foreign_keys)", "def has_deep_key(obj, key):\n\tif isinstance(key, str):\n\t\tkey = key.split('.')\n\t\t\n\tlast_obj = obj\n\tfor v in key:\n\t\tif not last_obj.has_key(v):\n\t\t\treturn False\n\t\tlast_obj = last_obj[v]\n\t\n\treturn True", "def is_child_graph(self, child_graph):\n # pylint: disable=protected-access\n if not child_graph or not child_graph._parent_graph:\n return False\n if child_graph._parent_graph == self:\n return True\n return self.is_child_graph(child_graph._parent_graph)\n # pylint: enable=protected-access", "def is_known(self, child):\r\n return child in self._parents", "def _isthisapropertree(self):\n ok = True\n if self._leftchild is not None:\n if self._leftchild._parent != self:\n ok = False\n if self._leftchild._isthisapropertree() is False:\n ok = False\n if self._rightchild is not None:\n if self._rightchild._parent != self:\n ok = False\n if self._rightchild._isthisapropertree() is False:\n ok = False\n if self._parent is not None:\n if self not in (self._parent._leftchild, self._parent._rightchild):\n ok = False\n return ok", "def is_parent(self, mother, kid):\n mom_node = self.names_to_nodes[mother]\n child_node = self.names_to_nodes[kid]\n return child_node.is_parent(mom_node)", "def can_add_child(self, child):\n if not self.is_valid_child(child):\n return False\n if child.isa == u'PBXGroup':\n return len(func.take(\\\n lambda c: c.pbx_name == child.pbx_name and c.realpath() == child.realpath(),\\\n self.pbx_children)) == 0\n else:\n return len(func.take(lambda c:c.realpath() == child.realpath(), self.pbx_children)) == 0", "def is_parent(self):\n return not self.children", "def incorrectly_nested(self):\n return self.parent is not None and self.root < self.parent.root", "def is_left_child(self):\n is_left_child = False\n parent = self.get_parent()\n if parent is not None:\n is_left_child = parent.get_left() == self\n\n return is_left_child", "def has_child(self, value):\n for node in self.children:\n if node.value == value:\n return True\n\n return False", "def __contains__(self,key):\n if self.recursiveLookup(key,self.root):\n return True\n else:\n return False", "def is_descendant_of(self, node):\n return False", "def is_leaf(self):\n if self._leftchild or self._rightchild:\n return False\n return True", "def __contains__(self, key):\n return self._get(key, self.root) is not None", "def leaf(self):\n if not self._leftchild and not self._rightchild:\n return True\n return False", "def __contains__(self, key):\n node, _ = Treap._find_node(key, self.root)\n return node is not None", "def contains_child(self, pid):\n return pid in self._children_ids", "def has_child(self, character):\n # get the position of that character\n if self.num_children() > 0:\n character = character.upper()\n # get the character position the children list\n index = self._get_index(character)\n # if there is a value(not None) in that position then we know it\n # exists\n # print(f'we see child exists => index: {index}, char: {character}')\n return self.children[index] is not None\n return False", "def _isthisapropertree(self):\n ok = True\n if self._leftchild:\n if self._leftchild._parent != self:\n ok = False\n if self._leftchild._isthisapropertree() == False:\n ok = False\n if self._rightchild:\n if self._rightchild._parent != self:\n ok = False\n if self._rightchild._isthisapropertree() == False:\n ok = False\n if self._parent:\n if (self._parent._leftchild != self\n and self._parent._rightchild != self):\n ok = False\n return ok", "def IsDescendantOf(self, parent, item):\r\n\r\n while item:\r\n \r\n if item == parent:\r\n \r\n # item is a descendant of parent\r\n return True\r\n \r\n item = item.GetParent()\r\n \r\n return False", "def is_leaf(self):\n # TODO: Check if both left child and right child have no value\n return ... and ...", "def has_children(self):\n return False", "def has_right_child(self, index):\n return self.get_right_child_index(index) < len(self.heap)", "def is_ancestor(self, other):\n\n if other is self:\n return True\n elif hasattr(other, 'base'):\n return self.is_ancestor(other.base)\n else:\n return False", "def _is_incex_key(self, key, value):\n key_out = ((self.included_attributes and\n (key not in self.included_attributes)) or\n (key in self.excluded_attributes))\n value_out = True\n if isinstance(value, dict):\n for change_key in value:\n if isinstance(value[change_key], dict):\n for key in value[change_key]:\n if ((self.included_attributes and\n (key in self.included_attributes)) or\n (key not in self.excluded_attributes)):\n value_out = False\n return key_out and value_out", "def has_child(self, uid: str) -> bool:\n return uid in self._children_uids", "def delete(self, value):\n node = self._root\n parent = None\n while node and node.key != value:\n parent = node;\n if value < node.key:\n node = node._left\n else:\n node = node._right\n\n if not node:\n return False\n\n if not node._left:\n if node == self._root:\n self._root = self._root.right\n else:\n if value <= parent.key:\n parent._left = node._right\n else:\n parent._right = node._right\n elif not node._right:\n if node == self._root:\n self._root = self._root.left\n else:\n if value <= parent.key:\n parent._left = node._left\n else:\n parent._right = node._left\n else:\n node1 = node._left\n parent1 = node\n while node1._right:\n parent1 = node1\n node1 = node1._right\n if node == self._root:\n if parent1 == self._root:\n self._root._key = node1.key\n self._root._left = node1._left\n else:\n parent1._right = node1._left\n self._root._key = node1.key\n else:\n node._key = node1.key\n if parent1 != node:\n parent1._right = node1._left\n return True", "def is_valid_child(self, child):\n return isinstance(child, baseobject.PBXBaseObject) \\\n and child.isa in self.allow_children_types()", "def __contains__(self, pid):\n return self.contains_child(pid) or self.contains_parent(pid)", "def hasChildren():", "def hasChild(self, *args):\n return _libsbml.XMLNode_hasChild(self, *args)", "def _is_hierachy_searchable(child_id: str) -> bool:\n pieces_of_child_id_list = child_id.split('.')\n suffix = pieces_of_child_id_list[len(pieces_of_child_id_list) - 1]\n return suffix.isnumeric()", "def has_children(self) -> bool:\n\n return False", "def isSuperRelation(self, rhs):\n return set(self.iteritems()).issuperset(rhs.iteritems())", "def IsChild(self, *args):\n return _XCAFDoc.XCAFDoc_GraphNode_IsChild(self, *args)", "def is_in(cls, hierarchical_dict: dict, key: str) -> bool:\n return key in cls.get_all_keys(hierarchical_dict)", "def is_same_branch(self, other):\n if self.id == other.id:\n return True\n elif self.is_descendant_of(other) or other.is_descendant_of(self):\n return True\n else:\n return False", "def is_leaf(self):\n return self._children == {}", "def is_leaf(self):\n return self._children == {}", "def is_leaf(self):\n return self._children == {}", "def is_leaf(self):\n return self._children == {}", "def has_child(self, term):\n for parent in self.children:\n if parent.id == term or parent.has_child(term):\n return True\n return False", "def has(self, key):\r\n # handle any special cases\r\n if key.scope == Scope.content:\r\n self._load_definition()\r\n elif key.scope == Scope.parent:\r\n return True\r\n\r\n # it's not clear whether inherited values should return True. Right now they don't\r\n # if someone changes it so that they do, then change any tests of field.name in xx._field_data\r\n return key.field_name in self._fields", "def is_leaf(self):\n return not self.children.exists()", "def __le__(self, other):\n if not isinstance(other, Key):\n return NotImplemented\n return self.__tuple() <= other.__tuple()", "def has_children(self):\n\n pass", "def is_same(self: _R, other: _R) -> bool:\n children = [i.render() for i in self.children]\n other_children = [i.render() for i in other.children]\n return other_children == children", "def is_parent(self) -> bool:\n return AccountEntry.objects.filter(parent=self).exists()", "def is_state_a_child_by_coord(x, y, width, height, parent: State) -> bool:\n if x+1 >= parent.x and y+1 >= parent.y and x + width - 1 <= parent.x + parent.width:\n if y + height - 1 <= parent.y + parent.height:\n return True\n return False", "def get_child_node(self, key):\n return _Node.__find_key_in_level(self, key)", "def is_child_bed(game_object: GameObject) -> bool:\n return CommonObjectTagUtils.has_game_tags(game_object, (\n CommonGameTag.FUNC_BED_KID,\n ))", "def isSubRelation(self, rhs):\n return set(self.iteritems()).issubset(rhs.iteritems())", "def semileaf(self):\n if self._leftchild and not self._rightchild:\n return True\n if self._rightchild and not self._leftchild:\n return True\n return False", "def find(self, key: int) -> bool:\n if self.empty():\n return False\n return self.root.find(key) is not None", "def haschild(self, child):\n return pbxhelper.pbxobj_has_pbxlist_value(self, u'pbx_children', child, \\\n self.is_valid_child)", "def __le__(self, other):\n return self._key <= other._key", "def _unbalanced(self):\n if self.internal():\n if self.full():\n if abs(self._leftchild._height-self._rightchild._height) >= 2:\n return True\n elif self._leftchild and not self._rightchild:\n if self._leftchild._height >= 2:\n return True\n elif self._rightchild._height >= 2:\n return True\n return False", "def has_parent(self):\n return False", "def is_parent(self):\n if self.parent is not None:\n return False\n return True", "def has_parent(self, index):\n return self.get_parent_index(index) < len(self.heap)", "def search(self, key): \n \n current_node = self.root \n length = len(key) \n for level in range(length): \n index = self._charToIndex(key[level]) \n if not current_node.children[index]: \n return False\n current_node = current_node.children[index] \n \n return current_node != None and current_node.isEndOfWord", "def verify_child(heights):\n dic = {}\n children = heights.columns[heights.columns.str.contains('^child_')] # Get children columns\n for child in children: # Loop through child_X\n dic.update({child:ks_permutation(heights, child, 'father')})\n return dic", "def is_parent(child, parent):\n # Get the list of processes\n assert child is not None\n assert parent is not None\n #child_ranks = [i for i in xrange(child.Get_size())]\n child_group = child.Get_group()\n parent_group = parent.Get_group()\n inter_group = MPI.Group.Intersect(child_group, parent_group)\n return child_group.Get_size() == inter_group.Get_size()", "def has_left_child(self, index):\n return self.get_left_child_index(index) < len(self.heap)", "def __eq__(self, other) -> bool:\n # pylint: disable=protected-access\n return isinstance(other, type(self)) and self._key == other._key", "def is_leaf(self) -> bool:\n return not any(self.children)", "def exists(root: Node, key: int):\n if root is None:\n return False\n else:\n if root.key == key:\n return True\n elif key < root.key:\n return exists(root.left, key)\n else:\n return exists(root.right, key)", "def _is_primary_key(self, key):\n return self._in_keys(key, [self._primary_key])", "def is_parent_of(self):\n return self.hasLabel('parent_of')", "def is_child_of_catalog(self, *args, **kwargs):\n # Implemented from kitosid template for -\n # osid.resource.BinHierarchySession.is_child_of_bin\n return self._get_provider_session('catalog_hierarchy_session').is_child_of_catalog(*args, **kwargs)", "def replace_index(self, _key):\n cur_node = self.root\n while type(cur_node) is not leaf:\n if _key in cur_node.keys:\n # replace the internal node\n i = cur_node.keys.index(_key)\n tmp = cur_node.pt[i+1] # right child\n\n while type(tmp) is not leaf:\n tmp = tmp.pt[0]\n \n cur_node.keys[i] = tmp.keys[0]\n return True\n\n flag = True\n for i, key in enumerate(cur_node.keys):\n if key > _key:\n cur_node = cur_node.pt[i]\n flag = False\n break\n \n # the value passed in is greater than all the keys in this node\n if flag:\n cur_node = cur_node.pt[-1]\n\n return False", "def is_internal(self):\n # TODO: Check if either left child or right child has a value\n return ... or ...", "def hasSiblings():", "def __eq__(self, other):\n return self.key == other.key \\\n and self.get_inside() == other.get_inside() \\\n and self.get_outside() == other.get_outside()", "def ChildOrMatch(self, other):\n return self._dir == other or other.startswith(self._dir + \"/\")", "def haschildren(self):\n return bool(self.children)", "def base_similar(self, other_root):\n if self.root == other_root:\n return True\n elif self.root.split('/')[:-1] == other_root.split('/')[:-1]:\n return True\n elif other_root in self.root:\n return True\n else:\n return False", "def __eq__(self, other: 'Tree') ->bool:\n return (type(self) is type(other) and\n self.value == other.value and\n self.children == other.children)" ]
[ "0.7154145", "0.6607566", "0.6557412", "0.64785355", "0.642279", "0.62678003", "0.6224301", "0.61702096", "0.61305875", "0.60911614", "0.6060285", "0.6012716", "0.5971186", "0.59556484", "0.59540933", "0.5942657", "0.594154", "0.59342253", "0.59315073", "0.59144604", "0.59130675", "0.5882587", "0.5856666", "0.583073", "0.5822231", "0.58198714", "0.5813549", "0.5787921", "0.57780725", "0.57771116", "0.5775948", "0.5758499", "0.5751501", "0.57442766", "0.57332003", "0.57279783", "0.57126105", "0.5709694", "0.57096726", "0.5709555", "0.5705101", "0.5688603", "0.56863415", "0.5656824", "0.56531054", "0.5647298", "0.56394976", "0.5622928", "0.56109303", "0.56101876", "0.561009", "0.56074804", "0.55995595", "0.5591487", "0.5590052", "0.55778176", "0.55537903", "0.55487674", "0.5541334", "0.5539702", "0.5539702", "0.5539702", "0.5539702", "0.55193925", "0.5515945", "0.5513828", "0.5505203", "0.5492363", "0.54888755", "0.54803634", "0.5475956", "0.5475931", "0.5468339", "0.5464025", "0.5461468", "0.5435606", "0.5418519", "0.5414033", "0.5402221", "0.5395817", "0.53919894", "0.5387028", "0.5383264", "0.5375467", "0.53733146", "0.53715146", "0.5369289", "0.53542817", "0.53406525", "0.5340465", "0.5329481", "0.5327607", "0.5323227", "0.53228116", "0.53218675", "0.53209454", "0.5319904", "0.5307042", "0.53059053", "0.5303897" ]
0.64586204
4
Attempts to parse the next item and returns it, along with its key if the item is valuelike.
def _parse_item(self): # type: () -> Optional[Tuple[Optional[Key], Item]] self.mark() saved_idx = self._save_idx() while True: c = self._current if c == "\n": # Found a newline; Return all whitespace found up to this point. self.inc() return (None, Whitespace(self.extract())) elif c in " \t\r": # Skip whitespace. if not self.inc(): return (None, Whitespace(self.extract())) elif c == "#": # Found a comment, parse it indent = self.extract() cws, comment, trail = self._parse_comment_trail() return (None, Comment(Trivia(indent, cws, comment, trail))) elif c == "[": # Found a table, delegate to the calling function. return else: # Begining of a KV pair. # Return to beginning of whitespace so it gets included # as indentation for the KV about to be parsed. self._restore_idx(*saved_idx) key, value = self._parse_key_value(True) return key, value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _getNextKey(self, item):\n return (2, item)", "def getitem(value, key):\n try:\n return value[key]\n except Exception:\n return \"\"", "def p_value_key(protItem):\n return protItem[-1]", "def p_value_key(protItem):\n return protItem[-1]", "def get_next(item: str, after: dict) -> str:\n item_followers = after.get(item, None)\n if item_followers is None:\n return None\n if len(item_followers) == 1:\n return item_followers[0]\n else:\n for item in item_followers:\n if all(is_predeccessor(after, item, x) for x in item_followers if item is not x):\n return item\n else:\n item_followers.sort()\n to_return = item_followers[0]\n after[to_return].extend(item_followers[1:])\n return to_return", "def get_key(self, item):\r\n return item[0]", "def _parse_key(self): # type: () -> Key\n if self._current in \"\\\"'\":\n return self._parse_quoted_key()\n else:\n return self._parse_bare_key()", "def process_item(item):\n if isinstance(item, ast.Slice):\n name = unwrap(item.lower)\n value = item.upper\n return name, value\n\n # due to ast.Index going away in 3.9, simple indices are just the value\n # themselves.\n if isinstance(item, ast.Name):\n return None, item\n\n if isinstance(item, ast.Index):\n return None, item.value\n\n raise TypeError(f\"{type(item)} type not handled\")", "def __next__(self) -> Tuple[keyType, valueType]:\n key = None\n value = None\n # To determine if it has encountered a situation where a key has multiple values.\n if (len(self.iter_values) != 0) and (self.iter_value_index < len(self.iter_values) - 1):\n self.iter_value_index += 1\n key = self.iter_key\n value = self.iter_values[self.iter_value_index]\n return key, value\n else:\n self.iter_value_index = -1\n self.iter_values = []\n\n def get_new_head_node_index(old_head_node_index: int) -> int:\n \"\"\"\n To find next node if the nodes in this chain are all visited.\n :param old_head_node_index: Subscript of the head node where the last accessed key-value pair is.\n :return: The subscript of the head node where the key-value pair has not been accessed; else return -1, if there's no new pair.\n \"\"\"\n # '-1' means that there is no more new node not visited.\n new_head_index = -1\n if old_head_node_index < self.length - 1:\n for index in range(old_head_node_index + 1, self.length):\n if len(self.hashTable[index].keys) > 0:\n new_head_index = index\n break\n return new_head_index\n\n head_node = self.hashTable[self.iter_head_node_index]\n # head_node.count > 0 means node existing.\n if len(head_node.keys) > 0:\n # There are nodes in the linked list is not accessed\n self.iter_chain_node_index += 1\n if len(head_node.keys) > self.iter_chain_node_index:\n keys_values_list = head_node.singlyLinkedList\n node = keys_values_list[self.iter_chain_node_index]\n key = node.key\n if len(node.values) == 1:\n value = node.values[0]\n else:\n self.iter_values = node.values\n value = node.values[0]\n self.iter_key = node.key\n self.iter_value_index += 1\n\n # All nodes in the linked list have been accessed. The new node should be accessed.\n else:\n # Find the hash address of the next node.\n new_hash_address = get_new_head_node_index(self.iter_head_node_index)\n # Find a new node that has not been visited.\n if new_hash_address != -1:\n # update the hash address and the node index.\n self.iter_head_node_index = new_hash_address\n self.iter_chain_node_index = 0\n head_node = self.hashTable[new_hash_address]\n\n keys_values_list = head_node.singlyLinkedList\n node = keys_values_list[self.iter_chain_node_index]\n key = node.key\n if len(node.values) == 1:\n value = node.values[0]\n else:\n self.iter_values = node.values\n value = node.values[0]\n self.iter_key = node.key\n self.iter_value_index = 0\n # There are no new and accessible nodes.\n else:\n raise StopIteration\n else:\n new_hash_address = get_new_head_node_index(self.iter_head_node_index)\n if new_hash_address != -1:\n self.iter_head_node_index = new_hash_address\n self.iter_chain_node_index = 0\n head_node = self.hashTable[new_hash_address]\n\n keys_values_list = head_node.singlyLinkedList\n node = keys_values_list[self.iter_chain_node_index]\n key = node.key\n if len(node.values) == 1:\n value = node.values[0]\n else:\n self.iter_values = node.values\n value = node.values[0]\n self.iter_key = node.key\n self.iter_value_index = 0\n # There is no new and accessible node.\n else:\n raise StopIteration\n return key, value", "def peekitem(self, i):\n key = self._sorted[i]\n return key, self._map[key]", "def get_next_item(todo_list):\n next_item = {}\n\n try:\n if len(todo_list) > 0:\n next_item = todo_list[0]\n except next_item:\n print(\"next_item could not be created\")\n\n return next_item", "def popitem(self):\r\n while 1:\r\n key, value = self.data.popitem()\r\n o = key()\r\n if o is not None:\r\n return o, value", "def popitem(self):\r\n while True:\r\n key, value = self.data.popitem()\r\n o = key()\r\n if o is not None:\r\n return o, value", "def get_next_item(self):\n pass", "def _parse_value(self): # type: () -> Item\n self.mark()\n trivia = Trivia()\n\n c = self._current\n if c == '\"':\n return self._parse_basic_string()\n elif c == \"'\":\n return self._parse_literal_string()\n elif c == \"t\" and self._src[self._idx :].startswith(\"true\"):\n # Boolean: true\n self.inc_n(4)\n\n return Bool(True, trivia)\n elif c == \"f\" and self._src[self._idx :].startswith(\"false\"):\n # Boolean: true\n self.inc_n(5)\n\n return Bool(False, trivia)\n elif c == \"[\":\n # Array\n elems = [] # type: List[Item]\n self.inc()\n\n while self._current != \"]\":\n self.mark()\n while self._current.is_ws() or self._current == \",\":\n self.inc()\n\n if self._idx != self._marker:\n elems.append(Whitespace(self.extract()))\n\n if self._current == \"]\":\n break\n\n if self._current == \"#\":\n cws, comment, trail = self._parse_comment_trail()\n\n next_ = Comment(Trivia(\"\", cws, comment, trail))\n else:\n next_ = self._parse_value()\n\n elems.append(next_)\n\n self.inc()\n\n res = Array(elems, trivia)\n\n if res.is_homogeneous():\n return res\n\n raise self.parse_error(MixedArrayTypesError)\n elif c == \"{\":\n # Inline table\n elems = Container()\n self.inc()\n\n while self._current != \"}\":\n if self._current.is_ws() or self._current == \",\":\n self.inc()\n continue\n\n key, val = self._parse_key_value(False)\n elems.append(key, val)\n\n self.inc()\n\n return InlineTable(elems, trivia)\n elif c in string.digits + \"+\" + \"-\":\n # Integer, Float, Date, Time or DateTime\n while self._current not in \" \\t\\n\\r#,]}\" and self.inc():\n pass\n\n raw = self.extract()\n\n item = self._parse_number(raw, trivia)\n if item:\n return item\n\n try:\n res = parse_rfc3339(raw)\n except ValueError:\n res = None\n\n if res is None:\n raise self.parse_error(InvalidNumberOrDateError)\n\n if isinstance(res, datetime.datetime):\n return DateTime(res, trivia, raw)\n elif isinstance(res, datetime.time):\n return Time(res, trivia, raw)\n elif isinstance(res, datetime.date):\n return Date(res, trivia, raw)\n else:\n raise self.parse_error(InvalidNumberOrDateError)\n else:\n raise self.parse_error(UnexpectedCharError, (c))", "def next(self):\n if LongObjectHashMap.self.modCount != self.expectedModCount:\n raise ConcurrentModificationException()\n if not self.hasNext():\n raise NoSuchElementException()\n keys = LongObjectHashMap.self.keys\n self.count += 1\n if self.prevKey != self.EMPTY_KEY:\n self.innerIndex += 1\n while len(keys):\n if keys[self.index] != None:\n while len(length):\n key = keys[self.index][self.innerIndex]\n value = self.values[self.index][self.innerIndex]\n if key == self.EMPTY_KEY:\n break\n self.lastReturned = key\n self.prevKey = key\n self.prevValue = value\n return self.prevValue\n self.innerIndex += 1\n self.innerIndex = 0\n self.index += 1\n raise NoSuchElementException()", "def __getitem__(self, item):\n result = self._get_raw_input()[item]\n return result[0] if isinstance(result, list) else result", "def popitem(self):\n key = next(iter(self))\n return key, self.pop(key)", "def peek(self):\n if self.count() <= 0:\n raise ValueError('Cannot peek at value that does not exist')\n return self.items[1]", "def _parse_table(self): # type: (Optional[str]) -> Tuple[Key, Item]\n indent = self.extract()\n self.inc() # Skip opening bracket\n\n is_aot = False\n if self._current == \"[\":\n if not self.inc():\n raise self.parse_error(UnexpectedEofError)\n\n is_aot = True\n\n # Key\n self.mark()\n while self._current != \"]\" and self.inc():\n pass\n\n name = self.extract()\n key = Key(name, sep=\"\")\n\n self.inc() # Skip closing bracket\n if is_aot:\n # TODO: Verify close bracket\n self.inc()\n\n cws, comment, trail = self._parse_comment_trail()\n\n result = Null()\n values = Container()\n\n while not self.end():\n item = self._parse_item()\n if item:\n _key, item = item\n if not self._merge_ws(item, values):\n values.append(_key, item)\n else:\n if self._current == \"[\":\n _, name_next = self._peek_table()\n\n if self._is_child(name, name_next):\n key_next, table_next = self._parse_table()\n key_next = Key(key_next.key[len(name + \".\") :])\n\n values.append(key_next, table_next)\n\n # Picking up any sibling\n while not self.end():\n _, name_next = self._peek_table()\n\n if not self._is_child(name, name_next):\n break\n\n key_next, table_next = self._parse_table()\n key_next = Key(key_next.key[len(name + \".\") :])\n\n values.append(key_next, table_next)\n else:\n table = Table(\n values, Trivia(indent, cws, comment, trail), is_aot\n )\n\n result = table\n if is_aot and (\n not self._aot_stack or name != self._aot_stack[-1]\n ):\n result = self._parse_aot(table, name)\n\n break\n else:\n raise self.parse_error(\n InternalParserError,\n (\"_parse_item() returned None on a non-bracket character.\"),\n )\n\n if isinstance(result, Null):\n result = Table(values, Trivia(indent, cws, comment, trail), is_aot)\n\n return key, result", "def _parse_item(item: str) -> dict:\n delimiter = _get_delimiter(item)\n key, value = item.split(delimiter)\n if delimiter == '=':\n return {key: value}\n else:\n try:\n return {key: json.loads(value)}\n except json.JSONDecodeError:\n raise click.UsageError(JSON_ERROR_MESSAGE.format(item))", "def parse(self):\r\n for key, value in KLVParser(self.value, self.key_length):\r\n try:\r\n self.items[key] = self.parsers[key](value)\r\n except Exception:\r\n None", "def tp_key_value(str_tag):\n rgx_split = re.compile(r'[\\@\\(\\)\\{\\}]')\n str_key, str_value = '', ''\n\n # count the pieces\n lst_parts = rgx_split.split(str_tag)\n lng_parts = len(lst_parts)\n\n # and winnow the noise\n if lng_parts > 1:\n str_key = lst_parts[1]\n if lng_parts > 2:\n for str_value in lst_parts[2:]:\n if str_value != '':\n break\n\n return (str_key, str_value)", "def _getPrevKey(self, item):\n return (1, item)", "def next(self):\n self.iterator.next()\n return self.iterator.prevKey", "def _parse_next_start(self, item):\n return parse(\" \".join(item.split(\"–\")[:-1]))", "def do_dict_entry_for_item(parser, token):\r\n bits = token.contents.split()\r\n if len(bits) != 6:\r\n raise template.TemplateSyntaxError(\"'%s' tag takes exactly five arguments\" % bits[0])\r\n if bits[2] != 'from':\r\n raise template.TemplateSyntaxError(\"second argument to '%s' tag must be 'from'\" % bits[0])\r\n if bits[4] != 'as':\r\n raise template.TemplateSyntaxError(\"fourth argument to '%s' tag must be 'as'\" % bits[0])\r\n return DictEntryForItemNode(bits[1], bits[3], bits[5])", "def GetNextExpanded(self, item): \r\n\r\n return self.GetNext(item, False)", "def _resolver_first(self, item: Any, *_: Any) -> Any:\n try:\n return next(iter(item))\n except StopIteration:\n assert False # not supposed to happen in current tests", "def _get_item(self, item_name, item_type):\n\t\t# create local cache for performance optimizations. TODO: Rewrite functions that call this function\n\t\tif not self.item_list:\n\t\t\tself.item_list = self.pre_object_list\n\t\t\tself.item_cache = {}\n\t\t\tfor item in self.item_list:\n\t\t\t\tif not item.has_key('name'):\n\t\t\t\t\tcontinue\n\t\t\t\tname = item['name']\n\t\t\t\ttmp_item_type = (item['meta']['object_type'])\n\t\t\t\tif not self.item_cache.has_key( tmp_item_type ):\n\t\t\t\t\tself.item_cache[tmp_item_type] = {}\n\t\t\t\tself.item_cache[tmp_item_type][name] = item\n\t\ttry:\n\t\t\treturn self.item_cache[item_type][item_name]\n\t\texcept:\n\t\t\treturn None\n\t\tif self.item_cache[item_type].has_key(item_name):\n\t\t\treturn self.item_cache[item_type][item_name]\n\t\treturn None\n\t\tfor test_item in self.item_list: \n\t\t\t## Skip items without a name\n\t\t\tif not test_item.has_key('name'):\n\t\t\t\tcontinue\n\n\t\t\t## Make sure there isn't an infinite loop going on\n\t\t\ttry:\n\t\t\t\tif (test_item['name'] == item_name) and (test_item['meta']['object_type'] == item_type):\n\t\t\t\t\treturn test_item\n\t\t\texcept:\n\t\t\t\traise ParserError(\"Loop detected, exiting\", item=test_item)\n\t\t\t\n\t\t## If we make it this far, it means there is no matching item\n\t\treturn None", "def parse_items(self):", "def popitem(self):\r\n while True:\r\n key, sleek_ref = self.data.popitem()\r\n try:\r\n return key, sleek_ref()\r\n except SleekRefDied:\r\n pass", "def __getitem__(self, item):\n result = self.get(item)\n if not result:\n raise KeyError(item)\n else:\n return result", "def _NextItem(self):\n if self._injected:\n self._injected = False\n return self._injected_value\n try:\n # Object is a generator or iterator.\n return self._iterable.next()\n except AttributeError:\n pass\n except StopIteration:\n self._tap.Done()\n raise\n try:\n # Object is a list.\n return self._iterable.pop(0)\n except (AttributeError, KeyError, TypeError):\n pass\n except IndexError:\n self._tap.Done()\n raise StopIteration\n # Object is not iterable -- treat it as the only item.\n if self._iterable is None or self._stop:\n self._tap.Done()\n raise StopIteration\n self._stop = True\n return self._iterable", "def next(self):\n nxt = self.readentry()\n if nxt is None:\n raise StopIteration\n return nxt", "def _key_sorting(item):\n key, value = item\n if isinstance(value, Link):\n return (1, key)\n return (0, key)", "def key_stream(src, tokenizer=tokenize_mapper_json):\n this_streams_key = None\n while src.has_next():\n next_val = src.peek()\n key, value = tokenizer(next_val)\n if this_streams_key is None:\n this_streams_key = key\n if this_streams_key == key:\n yield tokenizer(src.next())[1]\n else:\n raise StopIteration()\n raise StopIteration()", "def __getitem__(self, key):\n\n if type(key) != self.type:\n raise TypeError\n\n first_char = key[:1]\n others = key[1:]\n\n if first_char not in self.children:\n print(\"FIRST_CHAR\", first_char)\n print(\"self.children\", self.children)\n raise KeyError\n\n if len(first_char) != 0 and len(others) == 0:\n node = self.children[first_char]\n\n if node.value is None:\n raise KeyError\n\n return node.value\n else:\n return self.children[first_char][others]", "def __next__(self):\n try:\n t = self.items[self.pos]\n except IndexError:\n raise EOF()\n self.pos += 1\n return t", "def __getitem__(self, item):\n return self.fields[item]", "def _map_popitem(self):\n if len(self) == 0:\n raise KeyError('key not found')\n key = self.keys()[0]\n return (key, self.pop(key))", "def __getitem__(self, item):\n return self.get(sighash=item)", "def get_next_item(self) -> Generator[LibraryItem, None, None]:\n pass", "def ParseItem(\r\n self,\r\n item: \"Statement.ItemType\",\r\n ) -> Optional[bool]:\r\n\r\n # Extract any whitespace prefix (if necessary)\r\n if self._ignore_whitespace_ctr:\r\n while True:\r\n whitespace_results = self._EatWhitespaceToken(self.normalized_iter)\r\n if whitespace_results is None:\r\n break\r\n\r\n self.results += whitespace_results\r\n self.normalized_iter = self.results[-1].IterAfter.Clone()\r\n\r\n # Extract the content\r\n if isinstance(item, TokenClass):\r\n result = self._ParseTokenItem(item)\r\n else:\r\n original_item = item\r\n if isinstance(item, Statement.NamedItem):\r\n item = item.Item\r\n\r\n extract_results_from_result_func = lambda result: result.Results\r\n\r\n if isinstance(item, Statement):\r\n result = self._ParseStatementItem(item)\r\n elif isinstance(item, DynamicStatements):\r\n result = self._ParseDynamicStatementItem(item)\r\n elif isinstance(item, tuple):\r\n result = self._ParseRepeatItem(item)\r\n elif isinstance(item, list):\r\n result = self._ParseOrItem(item)\r\n if result is not None:\r\n assert len(result.Results) == 1\r\n assert isinstance(result.Results[0], Statement.StatementParseResultItem)\r\n\r\n extract_results_from_result_func = lambda result: result.Results[0].Results\r\n else:\r\n assert False, item # pragma: no cover\r\n\r\n if result is None:\r\n return None\r\n\r\n statement_parse_result_item = Statement.StatementParseResultItem(\r\n original_item,\r\n extract_results_from_result_func(result),\r\n )\r\n\r\n self.results.append(statement_parse_result_item)\r\n self.normalized_iter = result.Iter.Clone()\r\n\r\n result = result.Success\r\n\r\n # Extract comment tokens (if any)\r\n self._ParsePotentialCommentItem()\r\n\r\n return result", "def _extract_item(self, item):\n ver = 0\n if isinstance(item, tuple):\n ver_sent = True\n nitem = len(item)\n if nitem == 1:\n ext = item[0]\n elif nitem == 2:\n ext, ver = item\n else:\n ver_sent = False\n ext = item\n return ext, ver, ver_sent", "def _parse_quoted_key(self): # type: () -> Key\n quote_style = self._current\n key_type = None\n for t in KeyType:\n if t.value == quote_style:\n key_type = t\n break\n\n if key_type is None:\n raise RuntimeError(\"Should not have entered _parse_quoted_key()\")\n\n self.inc()\n self.mark()\n\n while self._current != quote_style and self.inc():\n pass\n\n key = self.extract()\n self.inc()\n\n return Key(key, key_type, \"\")", "def __getitem__(self, item):\n try:\n if \".\" in item:\n keys = item.split(\".\")\n else:\n return self.data[item]\n element = self.data[keys[0]]\n for key in keys[1:]:\n element = element[key]\n except KeyError:\n raise KeyError(f\"The key '{item}' could not be found in the yaml file '{self.filename}'\")\n except Exception as e:\n print(e)\n raise ValueError(\"unkown error\")\n return element", "def get_next(self):\n\n # pop the next item off the front of the list\n item = self.r.lpop(self.joblist)\n\n # gotta decode the bytes\n ritem = item.decode('utf-8')\n\n # if nothing comes out of the list, then it's empty and return 0\n # otherwise return whatever is next\n if not item:\n return 0\n else:\n return ritem", "def _kv_helper(cache, value):\n vals = [v.replace('\"','') for v in value.split(cache[\"delimiter\"])]\n if \"filtering\" not in cache or _filtering_passed_helper(cache[\"filtering\"], vals): #yield if filtering criteria met or no filtering criteria \n k = \"+\".join(vals) if cache[\"key_columns\"] == \"*\" else \"+\".join(vals[l] for l in cache[\"key_columns\"]) \n v = \",\".join(vals) if cache[\"target_columns\"] == \"*\" else \",\".join([vals[l] for l in cache[\"target_columns\"]])\n return k, v\n return None, None", "def __getitem__(self, key):\r\n r = self._get_raw_input()[key]\r\n if isinstance(r, list):\r\n return r[0]\r\n return r", "def __getitem__(self, key):\n for entry_key, value in self.read(key):\n if entry_key != key:\n raise KeyError(key)\n return value\n raise KeyError(key)", "def getitem(s, i):\n while i > 0:\n s, i = rest(s), i - 1\n return first(s)", "def __next__(self):\n\n nxt = next(self.tree)\n if nxt is not None:\n return nxt.key", "def get_item(self, key):\n search_slot = self.count_hash(key, len(self.slots))\n\n if self.slots[search_slot] == key:\n data = self.data[search_slot]\n elif isinstance(self.slots[search_slot], tuple):\n index_tuple = (self.slots[search_slot].index(key))\n data = (self.data[search_slot][index_tuple])\n else:\n data = None\n\n return data", "def __getitem__(self, item):\n return self._metadata[item]", "def __getitem__(self, item):\n return self._state[\"data\"].get(item, None)", "def __getitem__(self, item):\n return self._state[\"data\"].get(item, None)", "def __getitem__(self, item):\n return self._state[\"data\"].get(item, None)", "def parse_data_value(self, value):\n #print('parsing: {}'.format(value))\n if len(value) == 0:\n return value\n elif value[0] == '(' and value[-1] == ')':\n newdict = {}\n cur_level = 0\n cur_key = []\n cur_value = []\n cur_inner = []\n state = 0\n first_key_pass = False\n for char in value[1:-1]:\n\n # State 0 - reading key\n if state == 0:\n if char == '=':\n state = 1\n elif first_key_pass and char == ',':\n pass\n else:\n cur_key.append(char)\n first_key_pass = False\n\n # State 1 - reading value\n elif state == 1:\n if char == ',':\n newdict[''.join(cur_key)] = self.parse_data_value(''.join(cur_value))\n cur_key = []\n cur_value = []\n cur_inner = []\n first_key_pass = True\n state = 0\n elif char == '(':\n cur_level += 1\n cur_inner.append(char)\n state = 2\n else:\n cur_value.append(char)\n\n # State 2 - Reading first char of an inner paren stanza\n elif state == 2:\n if char == '(':\n newdict[''.join(cur_key)] = []\n state = 4\n at_first = True\n else:\n state = 3\n\n # State 3 - reading a regular inner dict\n if state == 3:\n if char == '(':\n cur_level += 1\n elif char == ')':\n cur_level -= 1\n cur_inner.append(char)\n if cur_level == 0:\n newdict[''.join(cur_key)] = self.parse_data_value(''.join(cur_inner[1:-1]))\n cur_key = []\n cur_value = []\n cur_inner = []\n first_key_pass = True\n state = 0\n\n # State 4 - Reading a list\n elif state == 4:\n if char == '(':\n cur_level += 1\n if not at_first:\n cur_inner.append(char)\n elif char == ')':\n cur_level -= 1\n cur_inner.append(char)\n\n if cur_level == 1:\n newdict[''.join(cur_key)].append(self.parse_data_value(''.join(cur_inner)))\n cur_inner = []\n\n elif cur_level == 0:\n cur_key = []\n cur_value = []\n cur_inner = []\n first_key_pass = True\n state = 0\n\n elif cur_level == 1 and char == ',':\n pass\n\n else:\n cur_inner.append(char)\n\n at_first = False\n\n # Clean up, depending on our state\n if state == 0:\n pass\n elif state == 1:\n newdict[''.join(cur_key)] = self.parse_data_value(''.join(cur_value))\n else:\n raise Exception(\"shouldn't be able to get here\")\n\n return newdict\n else:\n\n # Check for quoted values, and don't split commas inside them.\n # Also don't try to parse mismatched quotes. We're just being\n # even more stupid about it and converting commas in quotes to\n # unicode snowmen, temporarily\n new_value = value\n replace_comma = u\"\\u2603\"\n quote_parts = value.split('\"')\n if len(quote_parts) > 1 and len(quote_parts) % 2 == 1:\n new_val_list = []\n for (idx, part) in enumerate(quote_parts):\n if idx % 2 == 1:\n new_val_list.append(part.replace(',', replace_comma))\n else:\n new_val_list.append(part)\n new_value = '\"'.join(new_val_list)\n\n parts = [p.replace(replace_comma, ',') for p in new_value.split(',')]\n if len(parts) == 1:\n # See the comment on the other side of the `if` here. We may have\n # a single-element dict.\n if '=' in value:\n newdict = {}\n (key, val) = value.split('=', 1)\n newdict[key] = val\n return newdict\n else:\n return value\n else:\n # This is hokey, and a byproduct of the stupid way we're parsing\n # this stuff (and is susceptible to corner cases) - anyway, at\n # this point we MAY have a dict, or we may just have a string\n # which happens to have a comma in it. We'll just test the first\n # element and see if there's an equals sign in it. If it does,\n # then we'll parse it as a dict. If not, just return as a string.\n if '=' in parts[0]:\n newdict = {}\n for part in parts:\n (key, val) = part.split('=', 1)\n newdict[key] = val\n return newdict\n else:\n return value", "def __getitem__(self, key):\n return self.keyvaluepair_set.get(key=key).value", "def first(data, key):\n for i in data:\n if key(i):\n return i\n return None", "def __getitem__(self, item):\n if isinstance(item, str):\n item = [i for i, v in enumerate(self.list) if item == v.name]\n if len(item) > 0:\n item = item[0]\n return self.list[item]", "def decode_map_element(self, item_type, value):\r\n import urllib\r\n key = value\r\n if \":\" in value:\r\n key, value = value.split(':',1)\r\n key = urllib.unquote(key)\r\n if Model in item_type.mro():\r\n value = item_type(id=value)\r\n else:\r\n value = self.decode(item_type, value)\r\n return (key, value)", "def __getitem__(self, item):\r\n return self._state[\"data\"].get(item, None)", "def __getitem__(self, item):\r\n return self._state[\"data\"].get(item, None)", "def _extract_by_key(self, line, key):\n search = r'{0}=.+?,'.format(key) # lazy match to first ,\n attr_match = re.search(search, line)\n if attr_match:\n # grab just the value of the attribute from attr_key=value,\n value = attr_match.group()[len(key) + 1 : len(attr_match.group()) - 1]\n return value\n else:\n return \"notfound\"", "def iteritems(self):\n\t\tself.filep.seek(self.start + 2048)\n\n\t\t# iterate until we hit the enddata marker\n\t\twhile self.filep.tell() < self.enddata - 1:\n\t\t\t# fetch the lengths of the key and value\n\t\t\t(klen, vlen) = unpack('<LL', self.filep.read(8))\n\n\t\t\t# yield the key and value as a tuple\n\t\t\tyield (self.filep.read(klen), self.filep.read(vlen))", "def iteritems(self, multi=False):\n root = self.root\n curr = root[NEXT]\n if multi:\n while curr is not root:\n yield curr[KEY], curr[VALUE]\n curr = curr[NEXT]\n else:\n for key in self.iterkeys():\n yield key, self[key]", "def __next__(self):\n\n # pointer is the current value\n # counter is an item next to pointer\n # take value from pointer position and reduce\n # counter until counter is not 0\n # if counter == 0 move pointer to the next position\n # with value (stride=2)\n if self.counter <= 0:\n # move pointer to the next item\n self.pointer += 2\n try:\n # take counter\n self.counter = self.data[self.pointer + 1]\n except IndexError:\n raise StopIteration\n\n # take value from pointer position and reduce counter\n value = self.data[self.pointer]\n self.counter -= 1\n\n return value", "def next(self):\n nextattr = self.iterobj.next()\n return (nextattr.name, self.attrs[nextattr.name])", "def by_key(item):\n return Line['key', item]", "def _item_or_tuple(self, seq):\n t = tuple(seq)\n if self._is_multi:\n return t\n else:\n return t[0]", "def _item_or_tuple(self, seq):\n t = tuple(seq)\n if self._is_multi:\n return t\n else:\n return t[0]", "def _item_or_tuple(self, seq):\n t = tuple(seq)\n if self._is_multi:\n return t\n else:\n return t[0]", "def __kv_pair(line):\n\n splitline = line.split(\"=\")\n\n if len(splitline) <= 1:\n return None, None\n\n key = splitline[0].strip()\n\n val = \"=\".join(splitline[1:]).strip()\n\n return key, val", "def __next__(self):\n for (k, v) in pairs(self._data):\n yield (v, k)", "def get_next(self) -> dict:\n raise NotImplementedError", "def parse_tag_key_value(key_value: str, value_required=True) -> Tuple[str, Any]:\n if not key_value:\n raise ValueError(\"key must be specified.\")\n\n if \"=\" not in key_value:\n if value_required:\n raise ValueError(f\"key=value pair expected: '{key_value}'\")\n return (key_value, ANY_VALUE)\n\n key, value = key_value.split(\"=\", 1)\n if not key:\n raise ValueError(f\"key must be specified: '{key_value}'\")\n return (key, parse_tag_value(value))", "def _item_to_elements_parser(self, item):\n elements = {}\n\n ####### Sad solution - look for better one. #######\n items = [\"data\", \"img\", \"title\", \"link\", \"price\"]\n values = (\"item.p.string.strip()\", 'item.img[\"src\"]', 'item.img[\"alt\"]',\n '''item.find(\"a\", {\"class\":\"detailsLink\"})['href']''',\n '''item.find('strong').string.strip()''')\n for key, value in zip(items, values):\n\n # CONVERT TIME\n # if key == \"data\":\n # try:\n # print (time.strptime(eval(value), \"%d %b\"))\n # except Exception as error:\n # print (error) # time data '5 paz' does not match format '%d %b'\n\n try:\n elements.update({key:eval(value)})\n except (TypeError, AttributeError):\n elements.update({key:None})\n\n\n # print()\n # for key, val in elements.items():\n # print (key, val)\n # print()\n ###################################################\n return elements", "def __getitem__(self, item):\n if type(item) == str:\n return self.__dict__[item]\n else:\n return self.__dict__", "def _get_tagged_value(self, key):\n return self._tagged_values_dict[key]", "def GetNext(self, item):\r\n\r\n i = item\r\n\r\n # First see if there are any children.\r\n children = i.GetChildren()\r\n if len(children) > 0:\r\n return children[0]\r\n else:\r\n # Try a sibling of this or ancestor instead\r\n p = item\r\n toFind = None\r\n while p and not toFind:\r\n toFind = self.GetNextSibling(p)\r\n p = self.GetItemParent(p)\r\n \r\n return toFind", "def __getitem__(self, item):\n\n # Accessing via key:\n if isinstance(item, str):\n var, rest = self.parse(item)\n\n # All variables are requested (return the object itself)\n if not var:\n return self\n\n if not rest:\n def get_field(var):\n try:\n return self._vars[var]\n except KeyError:\n pass\n\n try:\n return self._groups[var]\n except KeyError:\n raise KeyError(\n \"There is neither a variable nor group named \"\n \"'{}'!\".format(var)\n )\n\n try:\n return get_field(var)\n except KeyError as err:\n main_group = self.attrs.get(\"MAIN_GROUP\", None)\n if main_group is None:\n raise err\n\n return self[main_group][var]\n else:\n if var in self._groups:\n return self._groups[var][rest]\n else:\n raise KeyError(\"'{}' is not a group!\".format(var))\n elif isinstance(item, (tuple, list)) and len(item) \\\n and isinstance(item[0], str) and isinstance(item[1], int):\n return self[item[0]][:, item[1]]\n else:\n # Selecting elements via slicing:\n return self.select(item)", "def get_val(root, items, sep='.', **kwargs):\n assert isinstance(items, (list, six.string_types))\n for key in items if isinstance(items, list) else items.split(sep):\n if root is None:\n return root\n elif isinstance(root, list):\n if '|' not in key:\n raise ValueError(\"Found list but key {0} does not match list \"\n \"filter format 'x|y'\".format(key))\n field, value = key.split('|')\n list_filter = [x for x in root if x.get(field) == value]\n if list_filter:\n root = list_filter[0]\n elif isinstance(root, dict):\n root = root.get(key)\n else:\n root = root.__getattribute__(key)\n return root", "def read_typed_(self, start_key=\"\", end_key=None):\n if end_key == \"\":\n return\n start_key_entry = None\n if start_key:\n if isinstance(start_key, unicode):\n try:\n start_key = str(start_key)\n except:\n pass\n if not isinstance(start_key, str):\n raise ValueError(\"start must be <type 'str'> got: %s\" % type(start_key))\n start_key_entry = (start_key, )\n end_key_entry = None\n if end_key:\n if isinstance(end_key, unicode):\n try:\n end_key = str(end_key)\n except:\n pass\n if not isinstance(end_key, str):\n raise ValueError(\"end must be <type 'str'> got: %s\" % type(end_key))\n end_key_entry = (end_key, )\n \n split_entry = []\n for entry in self.read_entries_(start_key_entry, end_key_entry):\n if len(entry) == 2:\n if split_entry:\n self.not_read.append(split_entry[0][0])\n yield entry\n elif len(entry) == 5:\n if entry[1] == 0:\n if split_entry:\n self.not_read.append(split_entry[0][0])\n split_entry = [entry]\n elif (split_entry and split_entry[0][0] == entry[0] and\n len(split_entry) == int(entry[1]) and\n split_entry[0][3] == entry[3]):\n split_entry.append(entry)\n if split_entry and len(split_entry) == int(split_entry[0][2]):\n value = \"\".join([x[4] for x in split_entry])\n yield entry[0], value\n split_entry = []", "def __getitem__(self, item):\n return self._data[item]", "def parse(self):\n\t# tipo de dato del campo data de los slots\n type_zbx_hc_item_t_pointer = gdb.lookup_type(\"zbx_hc_item_t\").pointer()\n\n\t# Lista donde almacenamos los dict de itemid y numero de values\n\tvalue_counter = []\n\n\t# recorremos los slots desde el principio hasta encontrar el primer valor,\n\t# luego usamos los campos \"next\" para ir saltando por los slots con values\n for i in range(0, self.val[\"num_slots\"]):\n if str(self.val[\"slots\"][i]) != \"0x0\":\n slot = self.val[\"slots\"][i]\n\t item = slot[\"data\"].cast(type_zbx_hc_item_t_pointer)\n\n\t item_num_values = self.get_num_values(item)\n\t value_counter.append({\"itemid\": int(item[\"itemid\"]), \"num_values\": item_num_values})\n\n\t# Ordenamos la lista de tuplas segun quien tiene mayor numero de values pendientes\n\tvalue_counter.sort(key=lambda x: x[\"num_values\"], reverse=True)\n\n\t# Dict que devolveremos como respuesta\n\tresp = { \"slots\": int(self.val[\"num_slots\"]), \"num_data\": int(self.val[\"num_data\"]), \"data\": value_counter }\n\n\treturn resp", "def popitem(self, i=-1):\n if not self._sequence:\n raise KeyError('popitem(): dictionary is empty')\n try:\n key = self._sequence[i]\n except IndexError:\n raise IndexError('popitem(): index %s not valid' % i)\n return (key, self.pop(key))", "def _parse_enum(type, item):\n try:\n return type[item]\n except:\n return type(item)", "def get(self, x, item, d):\n if x is None:\n return None\n if len(item) == 0:\n raise Exception(\"item must have length >= 1\")\n char = item[d]\n if char < x.key_char:\n return self.get(x.left, item, d)\n elif char > x.key_char:\n return self.get(x.right, item, d)\n elif d < len(item) - 1:\n return self.get(x.mid, item, d + 1)\n else:\n return x", "def _decode_value(data):\n\n if type(data) is tuple:\n data = data[0]\n\n # Key does not exist\n if data == '0' or data == \"\":\n return None\n \n elif data[0] == _PREFIX:\n\n encoding = data[:2]\n value = data[2:]\n\n if encoding == _TYPE_DOUBLE or encoding == _TYPE_DOUBLE_C:\n return float(value)\n elif encoding == _TYPE_STRING or encoding == _TYPE_STRING_C:\n return value\n elif encoding == _TYPE_INT or encoding == _TYPE_INT_C:\n return int(value)\n elif encoding == _TYPE_BOOL or encoding == _TYPE_BOOL_C:\n return value == \"true\"\n else:\n return data\n\n elif data.startswith(\"<elsystem.collections.vector>\"):\n return _decode_vector(data)\n elif data.startswith(\"<elsystem.collections.dictionary>\"):\n return _decode_dictionary(data)\n else:\n return data", "def __getitem__(self, key: Union[Tuple[str, T], str]) -> Union[str, T]:\n if isinstance(key, tuple):\n return self.get(key[0], default=key[1])\n else:\n return self.get(key)", "def get_line_from_item(self, item):\n line = item['ptype']['S']\n i = 0\n\n while i < len(item) - 2:\n line = '{}, {}'.format(line, item['v{}'.format(i)]['S'])\n i = i + 1\n\n return line", "def lookalike_item(parser, token):\n tag_name, scientific_name = token.split_contents()\n return LookalikeItemNode(scientific_name)", "def next_gte_pair(self, key):\n\n node = self.__next_gte_node(self.node, key)\n\n if node == None:\n return None, None\n return (node.key, node.vp)", "def get_next(self):\n try:\n return self.the_input[self.index]\n except IndexError:\n return None", "def __next__(self):\n if self._idx < len(self._rib):\n key_result = self._rib.get_key(self._idx)\n result = self._rib[key_result]\n self._idx += 1\n return result\n\n raise StopIteration", "def __getitem__(self, item):\n u, v = item\n return self.__getitem(u, v)", "def __getitem__(self, key):\n if self._root:\n node = self._getItemHelper(key, self._root)\n if node:\n return node.value\n else:\n return None\n else:\n return None", "def __getitem__(self, key):\n ndx = self._findPosition(key)\n assert ndx is not None, 'Invalid map key'\n return self._entryList[ndx].value" ]
[ "0.6376964", "0.6067354", "0.5974745", "0.5974745", "0.59615964", "0.5913587", "0.5901849", "0.589939", "0.5882357", "0.5807142", "0.57675356", "0.5747622", "0.56723076", "0.5654322", "0.5633313", "0.56198114", "0.5533659", "0.5505485", "0.5488771", "0.54648966", "0.5464157", "0.5448432", "0.54443413", "0.54325056", "0.5430618", "0.5405117", "0.53845674", "0.536849", "0.5363494", "0.5363322", "0.53512275", "0.5307928", "0.53050333", "0.528265", "0.526422", "0.5260573", "0.52551305", "0.5249761", "0.5239449", "0.522513", "0.52123183", "0.52103025", "0.52084416", "0.5194829", "0.51873136", "0.51823354", "0.51694953", "0.5169398", "0.5159868", "0.5149753", "0.5149656", "0.5146288", "0.5136602", "0.5135289", "0.5128312", "0.5124405", "0.5124405", "0.5124405", "0.511338", "0.51088476", "0.5106421", "0.510536", "0.5104994", "0.5094982", "0.5094982", "0.5091934", "0.50867087", "0.5063602", "0.5059372", "0.50593466", "0.50573957", "0.5052241", "0.5044135", "0.5044135", "0.5039747", "0.50367874", "0.5032252", "0.50281084", "0.50227696", "0.50214285", "0.501687", "0.5014138", "0.5009801", "0.5008773", "0.49949953", "0.4989682", "0.49812603", "0.49782637", "0.49772608", "0.49766448", "0.496985", "0.4968974", "0.496711", "0.49635327", "0.49608123", "0.4936083", "0.49347812", "0.49281836", "0.4924842", "0.49167305" ]
0.7419916
0
Returns (comment_ws, comment, trail) If there is no comment, comment_ws and comment will simply be empty.
def _parse_comment_trail(self): # type: () -> Tuple[str, str, str] if self.end(): return "", "", "" comment = "" comment_ws = "" self.mark() while True: c = self._current if c == "\n": break elif c == "#": comment_ws = self.extract() self.mark() self.inc() # Skip # # The comment itself while not self.end() and not self._current.is_nl() and self.inc(): pass comment = self.extract() self.mark() break elif c in " \t\r,": self.inc() else: break if self.end(): break while self._current.is_spaces() and self.inc(): pass trail = "" if self._idx != self._marker or self._current.is_ws(): trail = self.extract() return comment_ws, comment, trail
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_from_comment(self, comment):\n comment = re.sub(r'\\s+', ' ', comment.strip().strip(\"/*\").strip())\n if len(comment) == 0:\n return set(), set()\n domain_terms, code_elements = self.extract_from_sentence(comment)\n return domain_terms, code_elements", "def get_comment_data(self, comment):\n # remove double spaces but not triple ones; we use triple spaces to split commenter and parent_commenter\n pattern = '(?<! ) {2}(?! )'\n comment = re.sub(pattern, ' ', comment).strip() # also strip leading and trailing spaces\n\n # get names\n ix = re.search('•', comment).span()[-1]\n names = [x.strip() for x in (comment[:ix]).strip().strip('•').split(' ')]\n try:\n commenter, parent_commenter = names\n except:\n commenter, parent_commenter = names[0], ''\n\n # handle deleted comments\n pattern = 'This comment was deleted.−+−+'\n commenter = commenter.replace(pattern, '').strip()\n \n # get post and upvotes\n comment_upvotes = comment[ix:].split('ago')[-1].strip(' ')\n ix = re.search('(see more)\\w+', comment_upvotes) # redefine ix as index that separates post message from post upvotes\n clean_comment, upvotes = comment_upvotes[:ix.span()[0]], comment_upvotes[ix.span()[0]:].replace('see more', '')\n\n # build dictionary\n d = dict(zip( ['commenter', 'parent_commenter', 'comment', 'upvotes']\n , [commenter, parent_commenter.strip(), clean_comment.strip(), upvotes.strip()]))\n\n return d", "def getComment(self, ind):\r\n if ind >= 0 and ind < len(self.comments):\r\n return self.comments[ind]\r\n return None", "def _get_draft_details(request, comments):\n last_key = None\n output = []\n linecache = {} # Maps (c.patch_key, c.left) to mapping (lineno, line)\n modified_patches = []\n fetch_base_failed = False\n\n for c in comments:\n patch = c.patch_key.get()\n if (patch.key, c.left) != last_key:\n url = request.build_absolute_uri(\n reverse(diff, args=[request.issue.key.id(),\n patch.patchset_key.id(),\n patch.filename]))\n output.append('\\n%s\\nFile %s (%s):' % (url, patch.filename,\n c.left and \"left\" or \"right\"))\n last_key = (patch.key, c.left)\n if patch.no_base_file:\n linecache[last_key] = _patchlines2cache(\n patching.ParsePatchToLines(patch.lines), c.left)\n else:\n try:\n if c.left:\n old_lines = patch.get_content().text.splitlines(True)\n linecache[last_key] = dict(enumerate(old_lines, 1))\n else:\n new_lines = patch.get_patched_content().text.splitlines(True)\n linecache[last_key] = dict(enumerate(new_lines, 1))\n except FetchError:\n linecache[last_key] = _patchlines2cache(\n patching.ParsePatchToLines(patch.lines), c.left)\n fetch_base_failed = True\n context = linecache[last_key].get(c.lineno, '').strip()\n url = request.build_absolute_uri(\n '%s#%scode%d' % (reverse(diff, args=[request.issue.key.id(),\n patch.patchset_key.id(),\n patch.filename]),\n c.left and \"old\" or \"new\",\n c.lineno))\n output.append('\\n%s\\n%s:%d: %s\\n%s' % (url, patch.filename, c.lineno,\n context, c.text.rstrip()))\n if modified_patches:\n ndb.put_multi(modified_patches)\n return '\\n'.join(output)", "def parseComments(data):\n global comments\n reviewBegins = '<div style=\"margin-left:0.5em;\">'\n reviewEnds = '<div style=\"padding-top: 10px; clear: both; width: 100%;\">'\n stars_line = 'margin-right:5px;'\n stars = re.compile('\\d+.\\d+ out of 5 stars')\n header_line = '<span style=\"vertical-align:middle;\"'\n helpful_line ='people found the following review helpful'\n helpful = re.compile('\\d+ of \\d+ people found the following review helpful')\n reviewText = '<span class=\"h3color tiny\">' # Actual review\n\n boundaries = commentsStartStopLineNmbr(data)\n for i in range(boundaries[0], boundaries[1] + 1):\n if reviewBegins in data[i]:\n curcomment = Comment()\n while reviewEnds not in data[i]:\n # Parse stars\n if stars_line in data[i]:\n stars_found = re.search(stars, data[i])\n if stars_found != None:\n curcomment.stars = stars_found.group()\n # Parse header\n elif header_line in data[i]:\n line = data[i]\n begin = line.find('<b>') + 3\n end = line.find('</b>')\n curcomment.header = line[begin : end]\n # Parse helpfulness\n elif helpful_line in data[i]:\n helpful_found = data[i].replace(\",\", \"\")\n helpful_found = re.search(helpful, helpful_found)\n if helpful_found != None:\n curcomment.helpful = helpful_found.group()\n # Parse body text\n elif reviewText in data[i]:\n i += 3\n if '<span class=\"small\"' in data[i]: # Yep, dirty trick :(\n i += 3\n data[i] = stripHtmlTags(data[i])\n curcomment.comment = re.sub(\"\\s+\", \" \", data[i])\n i += 1\n comments.append(curcomment.getonelinecomment())\n #comments.append(curcomment.__repr__())", "def get_comment_information_by_id(comment_id):\n comment = REDDIT.comment(comment_id)\n print(comment.body)\n print(vars(comment))", "def get_specific_comment_info(comment_id):\n start = time.time()\n\n comment = REDDIT.comment(comment_id)\n\n end = time.time()\n print(end - start)\n return comment.created_utc, comment.permalink, comment.score, comment.link_id", "def comments(self):\n return self.container['comments']", "def getAllComments(self):\r\n return [(ind, comment) for ind, comment in enumerate(self.comments)]", "def get_comment(view, pt):\n\n shell_vars = view.meta_info(\"shellVariables\", pt)\n if not shell_vars:\n return ([], [])\n\n # transform the list of dicts into a single dict\n all_vars = {}\n for v in shell_vars:\n if 'name' in v and 'value' in v:\n all_vars[v['name']] = v['value']\n\n line_comments = []\n block_comments = []\n\n # transform the dict into a single array of valid comments\n suffixes = [\"\"] + [\"_\" + str(i) for i in range(1, 10)]\n for suffix in suffixes:\n start = all_vars.setdefault(\"TM_COMMENT_START\" + suffix)\n end = all_vars.setdefault(\"TM_COMMENT_END\" + suffix)\n\n if start and end is None:\n line_comments.append((start,))\n elif start and end:\n block_comments.append((start, end))\n\n return (line_comments, block_comments)", "def get_comment(self):\n if self.simulation_data is None:\n return self.comment\n else:\n return self.simulation_data.mfdata[self.comment_path]", "def get_initial_comment_list(self, comment_tree):\n comment_tuples = []\n\n if self.link.sticky_comment_id:\n root_level_comments = comment_tree.tree.get(None, [])\n sticky_comment_id = self.link.sticky_comment_id\n if sticky_comment_id in root_level_comments:\n comment_tuples.append(CommentTuple(\n comment_id=sticky_comment_id,\n depth=0,\n parent_id=None,\n num_children=comment_tree.num_children[sticky_comment_id],\n child_ids=comment_tree.tree.get(sticky_comment_id, []),\n ))\n else:\n g.log.warning(\"Non-top-level sticky comment detected on \"\n \"link %r.\", self.link)\n return comment_tuples", "def extract_comments(self, response):\n\n # use the comment_parser package to extract HTML and JS comments\n try:\n html_comments = comment_parser.extract_comments_from_str(response.text, mime=\"text/html\")\n except (UnterminatedCommentError, CP_ParseError):\n html_comments = []\n try:\n js_comments = comment_parser.extract_comments_from_str(response.text, mime=\"application/javascript\")\n except (UnterminatedCommentError, CP_ParseError):\n js_comments = []\n\n # put the discovered comments together\n comments = list()\n for comment in html_comments:\n comments.append({\"line\": comment.line_number(), \"comment\": \"<!--\" + comment.text() + \"-->\"})\n for comment in js_comments:\n if comment.is_multiline():\n comments.append({\"line\": comment.line_number(), \"comment\": \"/*\" + comment.text() + \"*/\"})\n else:\n comments.append({\"line\": comment.line_number(), \"comment\": \"//\" + comment.text()})\n\n # store the discovered comments w.r.t. the response's path & query\n if comments:\n parsed_url = urllib.parse.urlparse(response.url)\n if self.config[\"crawl_parameter_links\"].lower() == \"true\":\n self.comments[parsed_url.path + parsed_url.query] = comments\n else:\n self.comments[parsed_url.path] = comments", "def _parse_comment(i, doc):\n\n if doc[i].strip() != \"/**\":\n raise ParseFailure(i, \"Expected beginning of block comment\")\n\n e = i + 1\n while e < len(doc) and doc[e].strip() != \"*/\":\n e += 1\n\n return e + 1, [x.rstrip() for x in doc[i + 1: e]]", "def comment(self):\n\t\treturn self.comment_", "def extract_time_from_comment(commentList, timecontrol):\n # get number of comments in the current board\n n = len(commentList)\n \n # remove backslashes from comments\n for i, cell in enumerate(commentList):\n commentList[i] = cell.replace(\"\\n\", \" \")\n\n # safe seconds from comments\n timecontrol = timecontrol # int(game.headers[\"TimeControl\"].split(\"+\")[0])\n\n # set start times for white and black player\n start_time_p1, start_time_p2 = timecontrol, timecontrol\n\n # times W\n times_p1 = []\n remaining_times_p1 = []\n # times B\n times_p2 = []\n remaining_times_p2 = []\n\n for i in range(0, n, 2):\n if(commentList[i] == ''):\n print(\"There are no comments for this move of the game.\")\n else:\n # White times\n numbers_in_comment = [int(s) for s in commentList[i] if s.isdigit()]\n string_clock = [str(i) for i in numbers_in_comment]\n clock = \",\".join(string_clock).replace(\",\", \"\")\n clock = clock[-5:]\n\n # if the stamps stands for time left\n time_p1 = start_time_p1 - get_sec(clock)\n times_p1.append(time_p1)\n remaining_times_p1.append(start_time_p1)\n start_time_p1 = start_time_p1 - (start_time_p1 - get_sec(clock))\n \n # Black times\n # print(numbers_in_comment)\n if(i+1 < len(commentList)):\n numbers_in_comment2 = [int(s) for s in commentList[i+1] if s.isdigit()]\n string_clock2 = [str(i) for i in numbers_in_comment2]\n clock2 = \",\".join(string_clock2).replace(\",\", \"\")\n clock2 = clock2[-5:]\n\n # if the stamps stands for time left\n time_p2 = start_time_p2 - get_sec(clock2) \n times_p2.append(time_p2)\n remaining_times_p2.append(start_time_p2)\n start_time_p2 = start_time_p2 - (start_time_p2 - get_sec(clock2))\n \n return times_p1, times_p2, remaining_times_p1, remaining_times_p2", "def _skeleton_to_nml_comments(self):\n\n nml_comments = []\n for nodes in self.nodes:\n comment_nodes = nodes[nodes['comment'].notnull()]\n for _, row in comment_nodes.iterrows():\n nml_comment = wknml.Comment(\n node=row['id'].values[0],\n content=row['comment'].values[0]\n )\n nml_comments.append(nml_comment)\n\n return nml_comments", "def commentsStartStopLineNmbr(data):\n begin = 0\n end = 0\n i = 0\n\n if data is None or len(data) < 1:\n return None\n\n while i < len(data):\n if \"<table class=\\\"CMheadingBar\\\"\" in data[i]:\n if begin is 0:\n begin = i\n else:\n end = i\n break\n i += 1\n return (int(begin), int(end))", "def _get_multi_line_comment(node):\n return _get_comment_from_node(node)", "def comments(self):\n return self._comments", "def comments(self):\n return self._comments", "def comment(self) :\n\t\ttry :\n\t\t\treturn self._comment\n\t\texcept Exception as e:\n\t\t\traise e", "def line_comments(self, tail_comment = None):\n tail_comment = tail_comment or self.tail_comment()\n return [comment for comment in self.comments if comment != tail_comment]", "def get_comments(self,comments):\n all_comments = []\n for comment in comments:\n try :\n all_comments.append({\n 'comment':comment['data']['body'],\n 'score':comment['data']['score']\n })\n except: pass\n return all_comments", "def _get_comment_order(self):\n\n comment_tuples = CommentOrdererBase.get_comment_order(self)\n if not comment_tuples:\n return comment_tuples\n elif isinstance(comment_tuples[-1], MissingChildrenTuple):\n missing_children_tuple = comment_tuples.pop()\n else:\n missing_children_tuple = None\n\n special_responder_ids = self.link.responder_ids\n\n # unfortunately we need to look up all the Comments for QA\n comment_ids = {ct.comment_id for ct in comment_tuples}\n comments_by_id = Comment._byID(comment_ids, data=True)\n\n # figure out which comments will be kept (all others are discarded)\n kept_comment_ids = set()\n for comment_tuple in comment_tuples:\n if comment_tuple.depth == 0:\n kept_comment_ids.add(comment_tuple.comment_id)\n continue\n\n comment = comments_by_id[comment_tuple.comment_id]\n parent = comments_by_id[comment.parent_id] if comment.parent_id else None\n\n if comment.author_id in special_responder_ids:\n kept_comment_ids.add(comment_tuple.comment_id)\n continue\n\n if parent and parent.author_id in special_responder_ids:\n kept_comment_ids.add(comment_tuple.comment_id)\n continue\n\n if hasattr(comment, \"distinguished\") and comment.distinguished != \"no\":\n kept_comment_ids.add(comment_tuple.comment_id)\n continue\n\n # add all ancestors to kept_comment_ids\n for comment_id in sorted(kept_comment_ids):\n # sort the comments so we start with the most root level comments\n comment = comments_by_id[comment_id]\n parent_id = comment.parent_id\n\n counter = 0\n while (parent_id and\n parent_id not in kept_comment_ids and\n counter < g.max_comment_parent_walk):\n kept_comment_ids.add(parent_id)\n counter += 1\n\n comment = comments_by_id[parent_id]\n parent_id = comment.parent_id\n\n # remove all comment tuples that aren't in kept_comment_ids\n comment_tuples = [comment_tuple for comment_tuple in comment_tuples\n if comment_tuple.comment_id in kept_comment_ids\n ]\n\n if missing_children_tuple:\n comment_tuples.append(missing_children_tuple)\n\n return comment_tuples", "def _parse_comments(self, tokens: TokenIterator):\n metadata = {}\n while tokens.peek().type == 'COMMENT':\n comment = tokens.next().text\n while comment:\n comment, found, meta = comment.rpartition('::')\n if found:\n key, _, value = meta.partition(' ')\n metadata[key] = value.rstrip()\n return metadata", "def comment(self):\r\n return self._comment if self._comment is not None else \"\"", "def get_extra_comments():\n try:\n yml_iter = cfg.yml_config[\"comments\"]\n except:\n # Probably no \"comments\" section in the yml-file.\n return \"\\n\"\n\n return (\"\\n\".join(yml_iter) + \"\\n\") if yml_iter is not None else \"\\n\"", "def trim_comment(line):\n if ';' not in line:\n return (line, None)\n\n comment_start = line.index(';')\n before_comment = line[:comment_start]\n spaces_before_comment = len(before_comment) - len(before_comment.rstrip())\n comment = line[comment_start:]\n return (before_comment.rstrip(), spaces_before_comment * ' ' + comment)", "def get_comments(qint,conn):\n\n comms = ('SELECT DISTINCT ip.value '\n 'FROM interaction i, interactionprop ip, cvterm cvt '\n 'WHERE i.interaction_id = ip.interaction_id AND ip.type_id = cvt.cvterm_id '\n 'AND cvt.is_obsolete=0 AND cvt.name != \\'comments on source\\' '\n 'AND cvt.name != \\'internalnotes\\' AND i.uniquename = %s')\n comnts = connect(comms, qint, conn)\n return(comnts)", "def get_comments(self):\n raise NotImplementedError", "def comments(self):\r\n return c.Comments(self)", "def comments(self):\r\n return c.Comments(self)", "def comments(self):\r\n return c.Comments(self)", "def get_comments(self):\n\t\tself.comments = graph.get_connections(post['id'], 'comments')", "def get_comment_format():\n commentstring = vim.eval(\"&commentstring\")\n if commentstring.endswith(\"%s\"):\n c = commentstring[:-2]\n return (c.rstrip(), c.rstrip(), c.rstrip(), \"\")\n comments = _parse_comments(vim.eval(\"&comments\"))\n for c in comments:\n if c[0] == \"SINGLE_CHAR\":\n return c[1:]\n return comments[0][1:]", "def getComment(self, n = None):\n \n if n is None:\n return self._comments\n else:\n return self._comments[n]", "def read_comment(comment):\n comment_dict = {}\n\n debug(\"parse tab in comment.\")\n comment_dict_from_tab, comment = parse_tab_in_comment(comment)\n debug(\"parsed dict: %s.\" % comment_dict_from_tab)\n comment_dict.update(comment_dict_from_tab)\n\n debug(\"parse space in comment.\")\n comment_dict_from_space, comment = parse_space_in_comment(comment)\n debug(\"parsed dict: %s.\" % comment_dict_from_space)\n comment_dict.update(comment_dict_from_space)\n\n debug(\"parse keyword in comment.\")\n comment_dict_from_keyword, comment = parse_keyword_in_comment(comment)\n debug(\"parsed dict: %s.\" % comment_dict_from_keyword)\n comment_dict.update(comment_dict_from_keyword)\n # keyword based separation.\n return comment_dict", "def _get_review_comments_body(\n self, pull_request_number: int) -> List[Tuple[str, str]]:\n review_comments = get_pull_request_review_comments(\n self._repo_name, pull_request_number, self._auth)\n if not review_comments:\n return []\n review_comments_msg = []\n for comment in review_comments:\n review_comments_msg.append((comment['path'], comment['body']))\n return review_comments_msg", "def test_get_comments():\n comments = list(get_comments(TEST_SUBREDDIT, TEST_START_DATE, TEST_END_DATE, TEST_MAX))\n\n # prints the dictionary of variables for each comment\n for x in comments:\n print(x.d_)", "def check_previous_comments(pr_id):\n comments = get_previous_pr_comments(pr_id)\n my_comments = [c for c in comments if c.author == 'github-actions']\n\n if len(my_comments) == 0:\n return [], '', None\n else:\n last_messages = {}\n final_message = my_comments[0].body\n final_date = my_comments[0].date\n\n for c in my_comments:\n if c.body not in last_messages:\n last_messages[c.body] = c.date\n elif last_messages[c.body] < c.date:\n last_messages[c.body] = c.date\n\n if final_date < c.date:\n final_message = c.body\n final_date = c.date\n\n return last_messages, final_message, final_date", "def getcomments(object):\r\n try:\r\n lines, lnum = findsource(object)\r\n except (IOError, TypeError):\r\n return None\r\n\r\n if ismodule(object):\r\n # Look for a comment block at the top of the file.\r\n start = 0\r\n if lines and lines[0][:2] == '#!': start = 1\r\n while start < len(lines) and string.strip(lines[start]) in ('', '#'):\r\n start = start + 1\r\n if start < len(lines) and lines[start][:1] == '#':\r\n comments = []\r\n end = start\r\n while end < len(lines) and lines[end][:1] == '#':\r\n comments.append(string.expandtabs(lines[end]))\r\n end = end + 1\r\n return string.join(comments, '')\r\n\r\n # Look for a preceding block of comments at the same indentation.\r\n elif lnum > 0:\r\n indent = indentsize(lines[lnum])\r\n end = lnum - 1\r\n if end >= 0 and string.lstrip(lines[end])[:1] == '#' and \\\r\n indentsize(lines[end]) == indent:\r\n comments = [string.lstrip(string.expandtabs(lines[end]))]\r\n if end > 0:\r\n end = end - 1\r\n comment = string.lstrip(string.expandtabs(lines[end]))\r\n while comment[:1] == '#' and indentsize(lines[end]) == indent:\r\n comments[:0] = [comment]\r\n end = end - 1\r\n if end < 0: break\r\n comment = string.lstrip(string.expandtabs(lines[end]))\r\n while comments and string.strip(comments[0]) == '#':\r\n comments[:1] = []\r\n while comments and string.strip(comments[-1]) == '#':\r\n comments[-1:] = []\r\n return string.join(comments, '')", "def parse_comments_html(advertise: Dict[str, Any]) -> Optional[List[str]]:\n if \"comments_html\" in advertise.keys():\n\n filtred_comments: str = advertise[\"comments_html\"][200::]\n\n tmp: List[str] = re.split(\"[ \\n\\t]{2,}\", filtred_comments)\n if '' in tmp:\n tmp.remove('')\n\n # Breaking comments\n master: List[List[str]] = []\n tmp_vec: List[str] = []\n for line in tmp:\n\n if re.search(\"de \\d{4,}\", line): # matches 'de 2018' that signals the end of comment\n master.append(tmp_vec)\n tmp_vec = []\n else:\n tmp_vec.append(line)\n\n # Cleaning comments\n for comment in master:\n if \"...\" in comment:\n comment.remove(\"...\")\n if \"O usuário contratou o serviço em\" in comment:\n comment.remove(\"O usuário contratou o serviço em\")\n\n return [\" \".join(m) for m in master]", "def make_parsed_comments(self):\n if not hasattr(self, 'separated_comments'):\n self.separated_comments = self.separate_comments()\n \n # build comments list of dictionaries, one dictionary for each article\n self.comments = []\n for self.separated_comment in self.separated_comments:\n try:\n comment_data = self.get_comment_data(self.separated_comment)\n self.comments.append(comment_data)\n except Exception as e:\n pass\n return self.comments", "def comments(self) -> list:\n return self._node[\"app_data\"][\"ui_data\"].get(\"comments\", [])", "def get_comments(self):\n\t\treturn self._client.get_comments(self)", "def get_description_from_comments(act_comments):\n desc_re = re.compile(r\"\\*[ ]+(?!@)(.*)\")\n return \"\\n\".join(desc_re.findall(act_comments))", "def _dump_comment(comment: List[str]) -> List[str]:\n return [\"/**\"] + comment + [\"*/\"]", "def invoice_history_comments(invoice):\n history = Invoice.history.filter(id=invoice.id)\n record = history.last()\n comments = []\n while record.next_record:\n next_record = record.next_record\n delta = next_record.diff_against(record)\n message = _('Changed: \\n')\n for change in delta.changes:\n if change.field not in AVOID_COMMENTS:\n if change.field != 'status':\n old_value = change.old\n new_value = change.new\n else:\n old_value = get_status_display(change.old)\n new_value = get_status_display(change.new)\n message += _('{} from {} to {}\\n').format(\n Invoice._meta.get_field(change.field).verbose_name,\n old_value,\n new_value,\n )\n if next_record.history_change_reason:\n message += gettext('\\nChange reason:\\n')\n message += next_record.history_change_reason\n if message != _('Changed: \\n'):\n comment = Comment(\n user=next_record.history_user,\n invoice=next_record.instance,\n message=message,\n comment_date_received=next_record.history_date\n )\n comments.append(comment)\n record = next_record\n return comments", "def get_semeval_content_with_relcomments(element):\n if element.tag == 'OrgQuestion':\n return get_orgquestion_content(element)\n\n if element.tag == 'Thread':\n return ' '.join(chain(\n [get_relquestion_content(element.find('./RelQuestion'))],\n [get_relcomment_content(comment)\n for comment in element.findall('./RelComment')]\n ))\n\n if element.tag == 'RelComment':\n return get_relcomment_content(element)\n\n return None", "def comment_for_run (ins, exp, runnum) :\n return dict_of_recs_for_run(ins, exp, runnum)['comment']", "def get_indicators_and_clean_comments(df):\n # Count number of \\n\n df[\"ant_slash_n\"] = df[\"comment_text\"].apply(lambda x: count_regexp_occ(r\"\\n\", x))\n # Get length in words and characters\n df[\"raw_word_len\"] = df[\"comment_text\"].apply(lambda x: len(x.split()))\n df[\"raw_char_len\"] = df[\"comment_text\"].apply(lambda x: len(x))\n # Check number of upper case, if you're angry you may write in upper case\n df[\"nb_upper\"] = df[\"comment_text\"].apply(lambda x: count_regexp_occ(r\"[A-Z]\", x))\n # Number of F words - f..k contains folk, fork,\n df[\"nb_fk\"] = df[\"comment_text\"].apply(lambda x: count_regexp_occ(r\"[Ff]\\S{2}[Kk]\", x))\n # Number of S word\n df[\"nb_sk\"] = df[\"comment_text\"].apply(lambda x: count_regexp_occ(r\"[Ss]\\S{2}[Kk]\", x))\n # Number of D words\n df[\"nb_dk\"] = df[\"comment_text\"].apply(lambda x: count_regexp_occ(r\"[dD]ick\", x))\n # Number of occurence of You, insulting someone usually needs someone called : you\n df[\"nb_you\"] = df[\"comment_text\"].apply(lambda x: count_regexp_occ(r\"\\W[Yy]ou\\W\", x))\n # Just to check you really refered to my mother ;-)\n df[\"nb_mother\"] = df[\"comment_text\"].apply(lambda x: count_regexp_occ(r\"\\Wmother\\W\", x))\n # Just checking for toxic 19th century vocabulary\n df[\"nb_ng\"] = df[\"comment_text\"].apply(lambda x: count_regexp_occ(r\"\\Wnigger\\W\", x))\n # Some Sentences start with a <:> so it may help\n df[\"start_with_columns\"] = df[\"comment_text\"].apply(lambda x: count_regexp_occ(r\"^\\:+\", x))\n # Check for time stamp\n df[\"has_timestamp\"] = df[\"comment_text\"].apply(lambda x: count_regexp_occ(r\"\\d{2}|:\\d{2}\", x))\n # Check for dates 18:44, 8 December 2010\n df[\"has_date_long\"] = df[\"comment_text\"].apply(lambda x: count_regexp_occ(r\"\\D\\d{2}:\\d{2}, \\d{1,2} \\w+ \\d{4}\", x))\n # Check for date short 8 December 2010\n df[\"has_date_short\"] = df[\"comment_text\"].apply(lambda x: count_regexp_occ(r\"\\D\\d{1,2} \\w+ \\d{4}\", x))\n # Check for http links\n df[\"has_http\"] = df[\"comment_text\"].apply(lambda x: count_regexp_occ(r\"http[s]{0,1}://\\S+\", x))\n # check for mail\n df[\"has_mail\"] = df[\"comment_text\"].apply(\n lambda x: count_regexp_occ(r'[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+', x)\n )\n # Looking for words surrounded by == word == or \"\"\"\" word \"\"\"\"\n df[\"has_emphasize_equal\"] = df[\"comment_text\"].apply(lambda x: count_regexp_occ(r\"\\={2}.+\\={2}\", x))\n df[\"has_emphasize_quotes\"] = df[\"comment_text\"].apply(lambda x: count_regexp_occ(r\"\\\"{4}\\S+\\\"{4}\", x))\n\n # Now clean comments\n df[\"clean_comment\"] = df[\"comment_text\"].apply(lambda x: prepare_for_char_n_gram(x))\n\n # Get the new length in words and characters\n df[\"clean_word_len\"] = df[\"clean_comment\"].apply(lambda x: len(x.split()))\n df[\"clean_char_len\"] = df[\"clean_comment\"].apply(lambda x: len(x))\n # Number of different characters used in a comment\n # Using the f word only will reduce the number of letters required in the comment\n df[\"clean_chars\"] = df[\"clean_comment\"].apply(lambda x: len(set(x)))\n df[\"clean_chars_ratio\"] = df[\"clean_comment\"].apply(lambda x: len(set(x))) / df[\"clean_comment\"].apply(\n lambda x: 1 + min(99, len(x)))", "def comment():", "def get_previous_pr_comments(pr_id):\n relevant_comments = get_status_json(pr_id, 'comments')\n\n comments = [Comment(c[\"body\"], datetime.fromisoformat(c['createdAt'].strip('Z')), c['author']['login'])\n for c in relevant_comments]\n return comments", "def test_get_specific_comment_info():\n a, b, c, d = get_specific_comment_info('g99c7c0')\n print('time created:', a, 'type:', type(a))\n print('permalink:', b, 'type:', type(b))\n print('karma score:', c, 'type:', type(c))\n print('submission id:', d, 'type:', type(d))", "def extract_comments(comments_file, output_filename=direc+\"/comments.txt\"):\r\n if not os.path.exists(output_filename.split(\"/\")[0]):\r\n os.makedirs(output_filename.split(\"/\")[0])\r\n\r\n print(\"Extracting comments from \" + comments_file + \"...\")\r\n comments_dict = {}\r\n with open(output_filename, \"w\", encoding=encoding) as f:\r\n current = 0\r\n for event, child in iterparse(comments_file, events=('start', 'end')):\r\n if current > SAMPLE_SIZE:\r\n break\r\n elif len(child.attrib) > 0 and event == \"start\":\r\n if child.attrib['PostId'] not in comments_dict:\r\n comments_dict[child.attrib['PostId']] = []\r\n comments_dict[child.attrib['PostId']].append(child.attrib['Id'])\r\n clean_comment = clean_markdown(child.attrib['Text'])\r\n line = child.attrib['Id'] + \"\\t\" + child.attrib['PostId'] + \"\\t\" + clean_comment + \"\\t\" + child.attrib['Score'] + \"\\n\"\r\n f.write(line)\r\n\r\n current += 1\r\n print_progress(current, SAMPLE_SIZE)\r\n print(\"\\nFinished extracting comments from \" + comments_file + \".\\n\")\r\n return comments_dict", "def get_initial_comment_list(self, comment_tree):\n return []", "def parse_space_in_comment(comment):\n max_spaces_dict = {}\n for line in comment:\n if (not line.strip()) or line.find(\" \") == -1:\n # empty line or line do not have spaces in it.\n continue\n max_spaces_dict[line] = max(len(list(v)) for k, v in groupby(line) if k == \" \")\n\n sep = [(line.index(\" \" * count) + count) for line, count in max_spaces_dict.items()]\n sep.sort()\n count_dict = {len(list(v)):k for k, v in groupby(sep)}\n\n if max(count_dict.keys()) < 3:\n return {}, comment\n\n comment_dict = {}\n # more than 3 lines following the same pattern, extract from it.\n sep_position = count_dict[max(count_dict.keys())] - 1\n debug(\"found boundary: %s\" % sep_position)\n\n def line_match_pattern(line, position, prev_line=None, next_line=None, recursive=True):\n \"\"\"\n for a line to match a pattern, its next line or its prev line must\n also match the pattern. Notice that the function would call itself\n to see if its next/prev line matches the pattern. So we used a flag\n to stop it from going deeper into the loop.\n \"\"\"\n if line.strip() and len(line) <= position + 1:\n return False\n if not (line[position] == \" \" and line[position+1] != \" \"):\n # The line itself must match the pattern.\n return False\n if (prev_line is None) and (next_line is None) and recursive:\n print(\"##### Bad way to call this function. ####\")\n return False\n\n if not recursive:\n # If we do not go deeper, then the current line just match the pattern.\n return True\n\n if prev_line and prev_line.strip() and not (line_match_pattern(prev_line, position, recursive=False)):\n return False\n\n if next_line and next_line.strip() and not (line_match_pattern(next_line, position, recursive=False)):\n return False\n\n return True\n\n comment_copy = copy(comment)\n for index, line in enumerate(comment_copy):\n if (not line.strip()) or line.find(\" \") == -1 or len(line) < sep_position:\n # empty line, or line has no space, or line to short.\n continue\n if index == 0:\n if line_match_pattern(line, sep_position, next_line=comment_copy[1]):\n key = line[:sep_position].strip(STRIPS)\n value = line[sep_position:].strip(STRIPS)\n debug(\"space || found %s: %s\" % (key, value))\n comment_dict[key] = value\n comment.remove(line)\n else:\n debug(\"First line, but it does not match\")\n continue\n elif index == len(comment_copy)-1:\n if line_match_pattern(line, sep_position, prev_line=comment_copy[-1]):\n key = line[:sep_position].strip(STRIPS)\n value = line[sep_position:].strip(STRIPS)\n debug(\"space || found %s: %s\" % (key, value))\n comment_dict[key] = value\n comment.remove(line)\n else:\n debug(\"last line, but it does not match\")\n continue\n elif line_match_pattern(line, sep_position, prev_line=comment_copy[index-1], next_line=comment_copy[index+1]):\n key = line[:sep_position].strip(STRIPS)\n value = line[sep_position:].strip(STRIPS)\n debug(\"space || found %s: %s\" % (key, value))\n comment_dict[key] = value\n comment.remove(line)\n return comment_dict, comment", "def comment_tokens(self):\n return self._comment_tokens", "def calc_comments(self):\n for comment in self.pull_request.get_comments():\n self._users.add(comment.user.login)\n lowercase_body = comment.body.lower()\n if \"protm\" in lowercase_body:\n self.num_protm += 1\n self.num_comments += 1\n if comment.body is not None:\n self.len_comments += len(comment.body)\n for reaction in comment.get_reactions():\n self._users.add(reaction.user.login)\n self.comment_reactions += 1", "def get_path_to_comment(cls, comment, context, comment_tree):\n\n if comment._id not in comment_tree.cids:\n # the comment isn't in the tree\n raise InconsistentCommentTreeError\n\n comment_id = comment._id\n path = []\n while comment_id and len(path) <= context:\n path.append(comment_id)\n try:\n comment_id = comment_tree.parents[comment_id]\n except KeyError:\n # the comment's parent is missing from the tree. this might\n # just mean that the child was added to the tree first and\n # the tree will be correct when the parent is added.\n raise InconsistentCommentTreeError\n\n # reverse the list so the first element is the most root level comment\n path.reverse()\n return path", "def comments(self):\r\n return RepoCommitsComments(self)", "def parse_comment(comment: Union[Token, PsuedoToken]) -> str:\n # Happens when there is no documentation comment in the source file for the\n # item.\n spelling = comment.spelling\n if spelling is None:\n return \"\"\n\n # Comments from clang start at the '/*' portion, but if the comment itself\n # is indented subsequent lines will have too much indent.\n # Transform::\n #\n # \"/**\\n * hello some comment\\n * on multiple lines\\n */\"\n #\n # into::\n #\n # \"/**\\n * hello some comment\\n * on multiple lines\\n */\"\n indent = \" \" * (comment.extent.start.column - 1)\n indented_comment = indent + spelling\n dedented_comment = textwrap.dedent(indented_comment)\n\n # Notes on the regex here.\n # Option 1 '\\s?\\*/?'\n # This piece will match comment lines that start with '*' or ' *'.\n # This will also match a trailing '*/' for the end of a comment\n #\n # Option 2 '^/\\*+<?'\n # This will match the start of a comment '/*' and consume any\n # subsequent '*'. This is also meant to catch '/**<' for trailing comments.\n #\n # Option 3 '\\*+/'\n # Matches any and all '*' up to the end of the comment string.\n contents = re.sub(\n r\"^\\s?\\*/?|^/\\*+<?|\\*+/\",\n lambda x: len(x.group(0)) * \" \",\n dedented_comment,\n flags=re.MULTILINE,\n )\n\n contents = textwrap.dedent(contents)\n\n # there may still be left over newlines so only strip those, but leave any\n # whitespaces.\n contents = contents.strip(\"\\n\")\n\n return contents", "def _comments(self) -> List[\"BaseSegment\"]:\n return [seg for seg in self.segments if seg.is_type(\"comment\")]", "def linear(comments):\r\n\r\n return {'root': comments}", "def separate_comments(self):\n if not hasattr(self, 'cleaned_html'):\n self.cleaned_html = self.clean_html()\n \n self.separated_comments = self.cleaned_html.split(self.post_splitter)\n return self.separated_comments", "def getTopComments(self):\n return self.topComments", "def comments(self):\r\n return RepoCommitsComments(self.parent)", "def process_comment(self, data):\r\n if not self.is_suppress:\r\n return [data]", "def comments(self):\r\n return comments.Comments(self)", "def comments(self):\r\n return Comments(self)", "def comments(self):\r\n return Comments(self)", "def comments(self):\r\n return Comments(self)", "def get_return_from_comment(act_comments):\n returns_re = re.compile(r\"[ ]+\\*[ ]+@return[ ]+\\{(.*)\\}.*\")\n returns_m = returns_re.search(act_comments)\n if returns_m:\n return returns_m.group(1)\n else:\n return None", "def _extract_comment_values(line, status, comment, sources):\n \n line = line.strip( )\n\n if len(line) > 1:\n if line[1] == ':':\n sources = re.split(',[\\s]*', line[2:].strip( ))\n elif line[1] == '.':\n comment = line[2:].strip( )\n elif line[1] == ',':\n flags = re.split(',[\\s]*', line[2:].strip( ))\n if not 'fuzzy' in flags:\n status = store.STATUS_COMPLETE\n \n return status, comment, sources", "def get_comments(self, project, story):\n ret_val = []\n resource = \"projects/{0:d}/stories/{1:d}/comments\".format(project.id,\n story.id)\n params = {\"fields\": Comment.FIELDS}\n comments = self._request(\"get\", resource, params=params)\n\n for comment in comments:\n ret_val.append(Comment(comment))\n\n return ret_val", "def _get_comments(self):\n if not hasattr(self, 'id'):\n raise BadReference('No matching issue on disk')\n return filter(lambda x: len(x) == 40, os.listdir(self.paths['comments']))", "def comments(self):\n return comments.Comments(self)", "def get_release_note(comments):\n release_note = \"\"\n i = 0\n for comment in comments:\n #pprint.pprint(comment)\n #print \"**** Comment-{0}: {1}\".format(i, comment['body'])\n #print \"**** Comment-{index}: {body}\".format(\n # index=i,\n # body=comment['body']\n # )\n #print \"\\tURL: {0}\".format(comment['html_url'])\n #print \"\\tURL: {url}\".format(url=comment['html_url'])\n #comment['body'].index('Changed make')\n if comment['body'].lower().find('changed make') >= 0:\n #print \"Found 'Release Note'\"\n release_note = comment['body']\n #else:\n #print \"No 'Release Note' found\"\n\n i += 1\n # print \"----------------------------------------------------------\\n\"\n return release_note", "def _get_comment(self, cell: NotebookNode, resources: ResourcesDict) -> None:\n\n # retrieve or create the comment object from the database\n comment = self.gradebook.find_comment(\n cell.metadata['nbgrader']['grade_id'],\n self.notebook_id,\n self.assignment_id,\n self.student_id)\n\n # save it in the notebook\n cell.metadata.nbgrader['comment'] = comment.comment", "def _parse_comments(s):\n i = iter(s.split(\",\"))\n\n rv = []\n try:\n while True:\n # get the flags and text of a comment part\n flags, text = next(i).split(':', 1)\n\n if len(flags) == 0:\n rv.append(('OTHER', text, text, text, \"\"))\n # parse 3-part comment, but ignore those with O flag\n elif 's' in flags and 'O' not in flags:\n ctriple = [\"TRIPLE\"]\n indent = \"\"\n\n if flags[-1] in string.digits:\n indent = \" \" * int(flags[-1])\n ctriple.append(text)\n\n flags, text = next(i).split(':', 1)\n assert flags[0] == 'm'\n ctriple.append(text)\n\n flags, text = next(i).split(':', 1)\n assert flags[0] == 'e'\n ctriple.append(text)\n ctriple.append(indent)\n\n rv.append(ctriple)\n elif 'b' in flags:\n if len(text) == 1:\n rv.insert(0, (\"SINGLE_CHAR\", text, text, text, \"\"))\n except StopIteration:\n return rv", "def comment_content(c):\n content = str(c)[4:-3]\n return content.strip()", "def dedent(comment):\n commentLines = comment.split('\\n')\n if len(commentLines) < 2:\n cleaned = list(map(str.lstrip, commentLines))\n else:\n spc = 0\n for char in commentLines[1]:\n if char in string.whitespace:\n spc = spc + 1\n else:\n break\n #now check other lines\n cleaned = []\n for line in commentLines:\n for i in range(min(len(line),spc)):\n if line[0] in string.whitespace:\n line = line[1:]\n cleaned.append(line)\n return '\\n'.join(cleaned)", "def test_print_comments():\n flat_comments, tree_comments = get_comments_from_submission_id('jrjn70')\n print(len(flat_comments))\n print(len(tree_comments))\n\n print('flat comments')\n for c in flat_comments[0:5]:\n comment_instance = REDDIT.comment(c)\n print(comment_instance.body)\n\n print()\n print('tree comments')\n for c in tree_comments[0:5]:\n comment_instance = REDDIT.comment(c)\n print(comment_instance.body)", "def getCellComment(self, row, column):\n\n\t\t\t\tcell = self.getCell(row = row, column = column)\n\n\t\t\t\treturn cell.comment.content", "def get_comments_from_submission_id(submission_id):\n flat_comments = []\n tree_comments = []\n\n submission = (REDDIT.submission(id=submission_id))\n print(submission.num_comments)\n print(submission.shortlink)\n\n # sort comments by best and get the flattened list\n submission.comment_sort = 'confidence'\n\n # tree comments traversal\n submission.comments.replace_more(limit=1)\n for comm in submission.comments.list():\n tree_comments.append(comm)\n\n flat_comments = list(submission.comments)\n\n return flat_comments, tree_comments", "def _get_draft_comments(request, issue, preview=False):\n comments = []\n tbd = []\n # XXX Should request all drafts for this issue once, now we can.\n for patchset in issue.patchsets:\n ps_comments = list(models.Comment.query(\n models.Comment.author == request.user,\n models.Comment.draft == True, ancestor=patchset.key))\n if ps_comments:\n patches = dict((p.key, p) for p in patchset.patches)\n for p in patches.itervalues():\n p.patchset_key = patchset.key\n for c in ps_comments:\n c.draft = False\n # Get the patch key value without loading the patch entity.\n # NOTE: Unlike the old version of this code, this is the\n # recommended and documented way to do this!\n pkey = c.patch_key\n if pkey in patches:\n patch = patches[pkey]\n c.patch_key = patch.key\n if not preview:\n tbd.extend(ps_comments)\n patchset.update_comment_count(len(ps_comments))\n tbd.append(patchset)\n ps_comments.sort(key=lambda c: (c.patch_key.get().filename, not c.left,\n c.lineno, c.date))\n comments += ps_comments\n\n return tbd, comments", "def get_details_of_line_being_tested(self):\n\n if not self.header:\n return (None, None)\n\n lines = []\n pos = self.view.sel()[0].begin()\n first_line = True\n while pos >= 0:\n details = self.get_details_of_test_assertion_line(pos)\n pos = details.line_region.begin() - 1\n if details.assertion_colrange:\n lines.append(details)\n elif not first_line or not details.comment_marker_match:\n break\n elif details.comment_marker_match:\n lines.append(details)\n first_line = False\n\n return (lines, details.line_region)", "def make_comment(self, input, start, end, elements):\n return elements[1].text.strip('{}')", "def extract_comment_py():\n debug(\"extract comment from a python script.\")\n for line in CURRENT_BUFFER[:3]:\n if re.search(r\"coding[:=]\\s*([-\\w.]+)\", line):\n pattern = re.compile(r\"coding[:=]\\s*(?P<encoding>[-\\w.]+)\")\n globals()['ENCODING'] = pattern.search(line).group('encoding')\n debug(\"found encoding: %s\" % globals()['ENCODING'])\n\n lines = list(CURRENT_BUFFER)\n for (i, iline) in enumerate(lines[:10]):\n # find \"\"\" or ''' in the first few lines.\n if '\"\"\"' in iline or \"'''\" in iline:\n # find the end of it.\n breaker = '\"\"\"' if '\"\"\"' in iline else \"'''\"\n for j, jline in enumerate(lines[i+1:]):\n if breaker in jline:\n # found it, format the comment a little bit.\n if j == 0:\n # in the same line, this is a one line comment.\n return [jline[jline.index(breaker)+3:jline.rindex(breaker)]]\n else:\n lines[i] = lines[i][lines[i].index(breaker)+3:]\n lines[i+j+1] = lines[i+j+1][:lines[i+j+1].rindex(breaker)]\n return lines[i:i+j+1]\n else:\n # end of the comment is not found.\n return\n else:\n # comment might start with #\n return extract_comment_sh(python_style=True)", "def get_comment_list(parser, token):\n return CommentListNode.handle_token(parser, token)", "def comments(self):\r\n return GistComments(self)", "def _get_comments(**kwargs):\r\n\r\n # Log in to get cookies.\r\n cookies = _login(**kwargs)\r\n\r\n if 'r' not in kwargs:\r\n # This is the first comments request.\r\n # Make the comments request and set an empty list.\r\n kwargs['r'] = requests.get('https://news.ycombinator.com/threads?id=%s' % kwargs['args'].username,\r\n cookies=cookies)\r\n\r\n # Check to make sure we have a good response.\r\n if not _good_response(**kwargs):\r\n kwargs.pop('r')\r\n return _get_comments(**kwargs)\r\n\r\n kwargs['comments'] = []\r\n\r\n # Grab the comments.\r\n J = pq(kwargs['r'].content)\r\n comments = J('table table td.default')\r\n\r\n for c in comments:\r\n\r\n comment = _sanitize_comment(J, c)\r\n\r\n if kwargs['args'].no_owner and comment['user'] == kwargs['args'].username:\r\n continue\r\n\r\n # Add the comment to the saved list.\r\n kwargs['comments'].append({\r\n 'user': comment['user'],\r\n 'comment': comment['comment'],\r\n 'reply': comment['reply'],\r\n 'points': comment['points'],\r\n 'link': comment['link'],\r\n 'parent': comment['parent'],\r\n 'story': comment['story'],\r\n 'date': comment['date'],\r\n })\r\n\r\n # If we're getting all comments.\r\n if kwargs['args'].all:\r\n\r\n # Find the 'More' link and load it.\r\n last = J('a', J('table table tr td.title:last'))\r\n if last.text() == 'More':\r\n kwargs['r'] = requests.get('https://news.ycombinator.com%s' % last.attr('href'),\r\n cookies=cookies)\r\n\r\n # Check to make sure we have a good response.\r\n if not _good_response(**kwargs):\r\n kwargs.pop('r')\r\n return _get_comments(**kwargs)\r\n\r\n # Call this function again, this time with the new list.\r\n return _get_comments(**kwargs)\r\n\r\n return kwargs['comments']", "def getHTMLComments(self, text):\n return self.doSpecial(text, '<!--', '-->', self.fParseHTMLComments)", "def messages(self):\n if self.rank < self.midpoint:\n return (self.midpoint, self.right)\n else:\n return (self.left, self.midpoint)", "def _get_comment_map(self):\r\n def _visit(obj):\r\n res = []\r\n for child in obj.get('children', []):\r\n res.append((child['id'], child))\r\n if 'children' in child:\r\n res += _visit(child)\r\n return res\r\n return dict(_visit(self.thread))", "def parse_comments(submission):\n comments = []\n submission.replace_more_comments()\n for c in praw.helpers.flatten_tree(submission.comments):\n comment_dict = c.__dict__\n\n # NOTE: author is a special case (and must be present)\n author = c.author.name if hasattr(c.author, \"name\") else None\n if not author:\n continue\n\n comment = {\n \"submission_id\": submission.id,\n \"author\": author\n }\n del comment_dict[\"author\"] # no longer needed\n for k in _model_columns(Comment):\n if k in comment_dict:\n comment[k] = comment_dict[k]\n comments.append(comment)\n\n return comments", "def separate_square_comments(self, data_str):\n data_buff = []\n lsq, rsq = data_str.find('['), -1\n while lsq > -1:\n if lsq != 0:\n data_buff.append(data_str[rsq+1:lsq])\n rsq = data_str.find(']', lsq+1)\n sub_lsq = data_str.find('[', lsq+1)\n while -1 < sub_lsq < rsq:\n sub_lsq = data_str.find('[', sub_lsq+1)\n rsq = data_str.find(']', rsq+1)\n if rsq == -1:\n raise PhyloValueError(\"Error: mismatched square brackets: '{}'. Cannot extract comments.\".format(data_str))\n data_buff.append(data_str[lsq:rsq+1])\n lsq = data_str.find('[', rsq+1)\n if rsq < len(data_str) - 1:\n data_buff.append(data_str[rsq+1 :])\n return data_buff", "def parse_comment(self, node):\n\n data = []\n\n if node is not None:\n comment_id_pattern = re.compile('comment-(\\d+)')\n for comment_node in node.find_all('div', class_='comment'):\n item = {}\n item['is_deletable'] = False\n item['is_editable'] = False\n \n comment_id_result = comment_id_pattern.search(comment_node.get('id'))\n if comment_id_result:\n item['id'] = int(comment_id_result.group(1))\n \n comment_body_node = comment_node.find('div', class_='comment-body')\n if comment_body_node is not None:\n item['content'] = ''\n for p in comment_body_node.find_all(recursive=False):\n if 'class' in p.attrs and 'author' in p['class']:\n item['author'] = p.get_text()\n item['profile_url'] = self.get_link(p.get('href'))\n author_id = self._parse_user_id_from_url(item['profile_url'])\n if self.userId == author_id:\n item['is_deletable'] = True\n item['is_editable'] = True\n elif 'class' in p.attrs and 'age' in p['class']:\n item['date'] = p.abbr['title']\n item['date_ago'] = timeago.format(self._parse_datetime(item['date']), datetime.now(TIMEZONE))\n elif 'class' in p.attrs and 'edit' in p['class']:\n continue\n elif p.name == 'form':\n continue\n else:\n item['content'] += str(p)\n\n data.append(item)\n\n return data", "def _readComments(self): \n self.NSCOML = nappy.utils.text_parser.readItemFromLine(self.file.readline(), int)\n self._readSpecialComments()\n self.NNCOML = nappy.utils.text_parser.readItemFromLine(self.file.readline(), int)\n self._readNormalComments()" ]
[ "0.5537778", "0.55255157", "0.55058956", "0.54286265", "0.53183115", "0.53128594", "0.5308172", "0.5303508", "0.53024226", "0.52887475", "0.52451444", "0.52160954", "0.5189738", "0.51862514", "0.51771724", "0.5174911", "0.5149944", "0.51467705", "0.51425964", "0.5140602", "0.5140602", "0.5140079", "0.50796854", "0.50387657", "0.5035902", "0.5030503", "0.50277376", "0.49910012", "0.49895298", "0.49829447", "0.49798602", "0.4968231", "0.4968231", "0.4968231", "0.49596587", "0.49579656", "0.49514294", "0.492867", "0.49203882", "0.4919887", "0.49158517", "0.4915251", "0.4904318", "0.49035138", "0.4896703", "0.4880768", "0.48704076", "0.48590583", "0.48574406", "0.48561954", "0.48541182", "0.48508194", "0.48437768", "0.4843289", "0.48354903", "0.48285308", "0.48207346", "0.48128226", "0.48117894", "0.4807994", "0.4797137", "0.4777587", "0.476007", "0.47547752", "0.4741363", "0.47406107", "0.47383425", "0.4729809", "0.47247812", "0.4712842", "0.47094294", "0.47094294", "0.47094294", "0.47070435", "0.47043264", "0.4699931", "0.46870193", "0.46688768", "0.4664379", "0.46493375", "0.46420822", "0.46210605", "0.4616289", "0.46132913", "0.45938098", "0.45904222", "0.45890927", "0.4581744", "0.45741302", "0.45728633", "0.4568615", "0.45636195", "0.45561185", "0.45534864", "0.4553083", "0.45509055", "0.4550268", "0.45473", "0.45470297", "0.45430326" ]
0.7539042
0
Parses a Key at the current position; WS before the key must be exhausted first at the callsite.
def _parse_key(self): # type: () -> Key if self._current in "\"'": return self._parse_quoted_key() else: return self._parse_bare_key()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_bare_key(self): # type: () -> Key\n self.mark()\n while self._current.is_bare_key_char() and self.inc():\n pass\n\n key = self.extract()\n\n return Key(key, sep=\"\")", "def _parse_quoted_key(self): # type: () -> Key\n quote_style = self._current\n key_type = None\n for t in KeyType:\n if t.value == quote_style:\n key_type = t\n break\n\n if key_type is None:\n raise RuntimeError(\"Should not have entered _parse_quoted_key()\")\n\n self.inc()\n self.mark()\n\n while self._current != quote_style and self.inc():\n pass\n\n key = self.extract()\n self.inc()\n\n return Key(key, key_type, \"\")", "def parse_key(self, key):\r\n if not key:\r\n self.aes = None # empty key == no encryption\r\n return self.parse_string(self.tmp) # must return size (see the next return)\r\n key.decode() # test availability\r\n size = len(key)\r\n for padding in (16, 24, 32): # fixed key size\r\n if size <= padding:\r\n break\r\n key += chr(0) * (padding - size)\r\n self.aes = AES.new(key)\r\n return self.parse_string(self.tmp) # if key changes you must update string\r", "def parse(self):\r\n for key, value in KLVParser(self.value, self.key_length):\r\n try:\r\n self.items[key] = self.parsers[key](value)\r\n except Exception:\r\n None", "def _parse_item(self): # type: () -> Optional[Tuple[Optional[Key], Item]]\n self.mark()\n saved_idx = self._save_idx()\n\n while True:\n c = self._current\n if c == \"\\n\":\n # Found a newline; Return all whitespace found up to this point.\n self.inc()\n\n return (None, Whitespace(self.extract()))\n elif c in \" \\t\\r\":\n # Skip whitespace.\n if not self.inc():\n return (None, Whitespace(self.extract()))\n elif c == \"#\":\n # Found a comment, parse it\n indent = self.extract()\n cws, comment, trail = self._parse_comment_trail()\n\n return (None, Comment(Trivia(indent, cws, comment, trail)))\n elif c == \"[\":\n # Found a table, delegate to the calling function.\n return\n else:\n # Begining of a KV pair.\n # Return to beginning of whitespace so it gets included\n # as indentation for the KV about to be parsed.\n self._restore_idx(*saved_idx)\n key, value = self._parse_key_value(True)\n\n return key, value", "def read_from(cls, s, taproot: bool = False):\n first = s.read(1)\n origin = None\n if first == b\"[\":\n prefix, char = read_until(s, b\"]\")\n if char != b\"]\":\n raise ArgumentError(\"Invalid key - missing ]\")\n origin = KeyOrigin.from_string(prefix.decode())\n else:\n s.seek(-1, 1)\n k, char = read_until(s, b\",)/\")\n der = b\"\"\n # there is a following derivation\n if char == b\"/\":\n der, char = read_until(s, b\"<{,)\")\n # legacy branches: {a,b,c...}\n if char == b\"{\":\n der += b\"{\"\n branch, char = read_until(s, b\"}\")\n if char is None:\n raise ArgumentError(\"Failed reading the key, missing }\")\n der += branch + b\"}\"\n rest, char = read_until(s, b\",)\")\n der += rest\n # multipart descriptor: <a;b;c;...>\n elif char == b\"<\":\n der += b\"<\"\n branch, char = read_until(s, b\">\")\n if char is None:\n raise ArgumentError(\"Failed reading the key, missing >\")\n der += branch + b\">\"\n rest, char = read_until(s, b\",)\")\n der += rest\n if char is not None:\n s.seek(-1, 1)\n # parse key\n k, xonly_repr = cls.parse_key(k, taproot)\n # parse derivation\n allow_hardened = isinstance(k, bip32.HDKey) and isinstance(k.key, ec.PrivateKey)\n derivation = AllowedDerivation.from_string(\n der.decode(), allow_hardened=allow_hardened\n )\n return cls(k, origin, derivation, taproot, xonly_repr)", "def kvlm_parse(self, raw, start=0, dct=None):\n if not dct:\n dct = collections.OrderedDict()\n\n # We search for the next space and the next newline.\n spc = raw.find(b\" \", start)\n nl = raw.find(b\"\\n\", start)\n\n # If space appears before newline, we have a keyword.\n\n # Base case\n # =========\n # If newline appears first (or there's no space at all, in which\n # case find returns -1), we assume a blank line. A blank line\n # means the remainder of the data is the message.\n if (spc < 0) or (nl < spc):\n assert nl == start\n dct[b\"\"] = raw[start + 1 :]\n return dct\n\n # Recursive case\n # ==============\n # we read a key-value pair and recurse for the next.\n key = raw[start:spc]\n\n # Find the end of the value. Continuation lines begin with a\n # space, so we loop until we find a \"\\n\" not followed by a space.\n end = start\n while True:\n end = raw.find(b\"\\n\", end + 1)\n if raw[end + 1] != ord(\" \"):\n break\n\n # Grab the value\n # Also, drop the leading space on continuation lines\n value = raw[spc + 1 : end].replace(b\"\\n \", b\"\\n\")\n\n # Don't overwrite existing data contents\n if key in dct:\n if type(dct[key]) == list:\n dct[key].append(value)\n else:\n dct[key] = [dct[key], value]\n else:\n dct[key] = value\n\n return self.kvlm_parse(raw, start=end + 1, dct=dct)", "def key_stream(src, tokenizer=tokenize_mapper_json):\n this_streams_key = None\n while src.has_next():\n next_val = src.peek()\n key, value = tokenizer(next_val)\n if this_streams_key is None:\n this_streams_key = key\n if this_streams_key == key:\n yield tokenizer(src.next())[1]\n else:\n raise StopIteration()\n raise StopIteration()", "def _determine_key(self, findend = False):\n if self.Key != None:\n name = None\n for n in self.Key:\n if n == None:\n return n\n # If the letter's valid, then deal with it\n if n == 0:\n if findend:\n return n.obj_offset + n.size()\n name = self.obj_vm.read(self.Key.obj_offset, n.obj_offset - self.Key.obj_offset).decode(\"utf16\", \"ignore\").encode(\"ascii\", 'backslashreplace')\n break\n return name\n return self.Key", "def _ParseRecord(self, parser_mediator, key, structure):\n if key == 'record_start':\n self._ParseRecordStart(structure)\n\n elif key == 'record_body':\n self._ParseRecordBody(structure)\n\n elif key == 'record_end':\n self._ParseRecordEnd(parser_mediator, structure)", "def _get_from_back(self, key):\n\n raise KeyError", "def _yield_keys(self, key):\n if self._len_keys > 1:\n keys = self._validate_and_split_key(key)\n for key in keys:\n yield tuple(sorted(list(key)))\n else:\n yield from self._validate_and_split_key(key)", "def read_key(key, task_state, video_state):\n if key in KEYMAP['left']: # -1 frame\n video_state.image_idx -= 1\n video_state.image_idx = min(max(0, video_state.image_idx), video_state.num_frames - 1)\n\n elif key in KEYMAP['right']: # +1 frame\n video_state.image_idx += 1\n video_state.image_idx = min(max(0, video_state.image_idx), video_state.num_frames - 1)\n\n elif key in KEYMAP['up']: # -10 frames\n video_state.image_idx -= 10\n video_state.image_idx = min(max(0, video_state.image_idx), video_state.num_frames - 1)\n\n elif key in KEYMAP['down']: # +10 frame\n video_state.image_idx += 10\n video_state.image_idx = min(max(0, video_state.image_idx), video_state.num_frames - 1)\n\n elif key == ord(','): # < # -100 frame\n video_state.image_idx -= 100\n video_state.image_idx = min(max(0, video_state.image_idx), video_state.num_frames - 1)\n\n elif key == ord('.'): # > # +100 frame\n video_state.image_idx += 100\n video_state.image_idx = min(max(0, video_state.image_idx), video_state.num_frames - 1)\n\n elif key == ord('\\x1b'): # esc\n video_state.save()\n return -1\n\n\n elif ord('0') <= key <= ord('9'): # action idx\n tmp = key - ord('0') - 1\n if 0 <= tmp < task_state.num_actions:\n action_idx = tmp\n video_state.labels[video_state.get_image_name()] = task_state.actions[int(action_idx)]\n video_state.save()\n\n\n # elif key == ord('r'): # remove\n # tmp = get_user_input('Enter the Remove Clip ID: ') - 1\n # if 0 <= tmp < len(video_state.clips):\n # video_state.clips.pop(tmp)\n\n elif key == ord('i'): # jump to image\n tmp = get_user_input('Enter the Frame ID: ') - 1\n if 0 <= tmp < video_state.num_frames:\n video_state.image_idx = tmp\n\n elif key == ord('t'): # jump to task\n tmp = get_user_input('Enter the Task ID: ') - 1\n if 0 <= tmp < task_state.num_tasks:\n task_state.task_idx = tmp\n return 1\n\n elif key == ord(']'): # next task / video\n task_state.task_idx += 1\n task_state.task_idx = min(max(0, task_state.task_idx), task_state.num_tasks - 1)\n video_state.save()\n return 1\n\n elif key == ord('['): # previous task / video\n task_state.task_idx -= 1\n task_state.task_idx = min(max(0, task_state.task_idx), task_state.num_tasks - 1)\n video_state.save()\n return 1\n\n elif key == ord('\\t'): # TAB #lookahead toggle\n video_state.look_ahead = (video_state.look_ahead + 1) % 3\n\n elif key == ord(' '): # randomize text color\n color_key = random.choice(list(COLORS.keys()))\n video_state.color = COLORS[color_key]\n\n return 0", "def parse_key_index(self, stmt):\r\n if stmt['name'] in self._names:\r\n raise ValueError('There is already a value with name {}'.format(stmt['name']))\r\n key_index = KeyIndex(name=stmt['name'],\r\n data_type=stmt.get('data_type', 'Vertex'))\r\n return key_index", "def ahead(self, k):\n assert k == 1\n if self.pos + k < len(self.tokens):\n return self.tokens[self.pos + k]\n return None", "def parse(self, key: str):\n redis_type = self.client.type(key).decode('utf-8')\n key_type = key\n if \":\" in key:\n key_type = key.split(\":\")[1]\n if redis_type == 'hash':\n deserializer = self.STATE_DESERIALIZERS.get(key_type)\n if not deserializer:\n raise AttributeError(NO_DESERIAL_MSG.format(key_type))\n self._parse_hash_type(deserializer, key)\n elif redis_type == 'set':\n deserializer = self.STATE_DESERIALIZERS.get(key_type)\n if not deserializer:\n raise AttributeError(NO_DESERIAL_MSG.format(key_type))\n self._parse_set_type(deserializer, key)\n else:\n value = self.client.get(key)\n # Try parsing as json first, if there's decoding error, parse proto\n try:\n self._parse_state_json(value)\n except (UnicodeDecodeError, JSONDecodeError, AttributeError):\n self._parse_state_proto(key_type, value)", "def start_catching_keys(amount, callback, input_line, cur, count, buf=None):\n global catching_keys_data\n if \"new_cur\" in catching_keys_data:\n new_cur = catching_keys_data['new_cur']\n catching_keys_data = {'amount': 0}\n return new_cur, True, False\n catching_keys_data = ({'amount': amount,\n 'callback': callback,\n 'input_line': input_line,\n 'cur': cur,\n 'keys': \"\",\n 'count': count,\n 'new_cur': 0,\n 'buf': buf})\n return cur, False, True", "def _validate_and_split_key(self, key):\n if self._len_keys == 1:\n return self._validate_and_split_len_one(key)\n else:\n return self._validate_and_split_len(key)", "def _get_pos_from_key(key, char):\n return [i+1 for i, c in enumerate(key) if c == char]", "def _getNextKey(self, item):\n return (2, item)", "def handle_key(self, key):\n pass", "def parse_key(raw_key):\n raw_key_bytes = raw_key.encode('ascii')\n try:\n validate_cmek(raw_key)\n key_type = KeyType.CMEK\n sha256 = None\n except errors.Error:\n if len(raw_key) != 44:\n raise\n key_type = KeyType.CSEK\n sha256 = hash_util.get_base64_hash_digest_string(\n hashlib.sha256(base64.b64decode(raw_key_bytes)))\n return EncryptionKey(key=raw_key, sha256=sha256, type=key_type)", "async def read(self, key: str) -> ResponseOrKey:", "def ExtractKey(keyString):\n if ShouldCapitalizeKey(keyString) and FirstCharIsPunctuationChar(keyString):\n return keyString[3:5]\n elif ShouldCapitalizeKey(keyString):\n return keyString[2:4]\n elif FirstCharIsPunctuationChar(keyString):\n return keyString[2:4]\n else:\n return keyString[1:3]", "def readKey(self, keyPath):\n\t\ttry:", "def test_unpack_2(self):\n key = ('item name', str, r'name')\n assert lws.parse_schema_key(key) == (str, 'name', '')", "def _validate_and_split_len_one(self, key):\n if isinstance(key, str):\n return [key]\n elif _is_iterable(key):\n keys = []\n for k in key:\n keys.extend(self._validate_and_split_len_one(k))\n return keys\n else:\n raise KeyError(\"The key {} is not valid.\".format(key))", "def read(self, key):\n raise NotImplementedError", "def __next__(self) -> Tuple[keyType, valueType]:\n key = None\n value = None\n # To determine if it has encountered a situation where a key has multiple values.\n if (len(self.iter_values) != 0) and (self.iter_value_index < len(self.iter_values) - 1):\n self.iter_value_index += 1\n key = self.iter_key\n value = self.iter_values[self.iter_value_index]\n return key, value\n else:\n self.iter_value_index = -1\n self.iter_values = []\n\n def get_new_head_node_index(old_head_node_index: int) -> int:\n \"\"\"\n To find next node if the nodes in this chain are all visited.\n :param old_head_node_index: Subscript of the head node where the last accessed key-value pair is.\n :return: The subscript of the head node where the key-value pair has not been accessed; else return -1, if there's no new pair.\n \"\"\"\n # '-1' means that there is no more new node not visited.\n new_head_index = -1\n if old_head_node_index < self.length - 1:\n for index in range(old_head_node_index + 1, self.length):\n if len(self.hashTable[index].keys) > 0:\n new_head_index = index\n break\n return new_head_index\n\n head_node = self.hashTable[self.iter_head_node_index]\n # head_node.count > 0 means node existing.\n if len(head_node.keys) > 0:\n # There are nodes in the linked list is not accessed\n self.iter_chain_node_index += 1\n if len(head_node.keys) > self.iter_chain_node_index:\n keys_values_list = head_node.singlyLinkedList\n node = keys_values_list[self.iter_chain_node_index]\n key = node.key\n if len(node.values) == 1:\n value = node.values[0]\n else:\n self.iter_values = node.values\n value = node.values[0]\n self.iter_key = node.key\n self.iter_value_index += 1\n\n # All nodes in the linked list have been accessed. The new node should be accessed.\n else:\n # Find the hash address of the next node.\n new_hash_address = get_new_head_node_index(self.iter_head_node_index)\n # Find a new node that has not been visited.\n if new_hash_address != -1:\n # update the hash address and the node index.\n self.iter_head_node_index = new_hash_address\n self.iter_chain_node_index = 0\n head_node = self.hashTable[new_hash_address]\n\n keys_values_list = head_node.singlyLinkedList\n node = keys_values_list[self.iter_chain_node_index]\n key = node.key\n if len(node.values) == 1:\n value = node.values[0]\n else:\n self.iter_values = node.values\n value = node.values[0]\n self.iter_key = node.key\n self.iter_value_index = 0\n # There are no new and accessible nodes.\n else:\n raise StopIteration\n else:\n new_hash_address = get_new_head_node_index(self.iter_head_node_index)\n if new_hash_address != -1:\n self.iter_head_node_index = new_hash_address\n self.iter_chain_node_index = 0\n head_node = self.hashTable[new_hash_address]\n\n keys_values_list = head_node.singlyLinkedList\n node = keys_values_list[self.iter_chain_node_index]\n key = node.key\n if len(node.values) == 1:\n value = node.values[0]\n else:\n self.iter_values = node.values\n value = node.values[0]\n self.iter_key = node.key\n self.iter_value_index = 0\n # There is no new and accessible node.\n else:\n raise StopIteration\n return key, value", "def process_key(self, key):\n\t\tif(self.index/SCROLL_CONSTANT >= len(self.text)):\n\t\t\tif(key == UP):\n\t\t\t\tself.select_index = max(0, self.select_index - 1)\n\t\t\telif(key == DOWN):\n\t\t\t\tself.select_index = min(len(self.choice_data_list) - 1, self.select_index + 1)", "def __getitem__(self, key):\n self.__check_key_validity(key)\n return self.data[key[0]][key[1]]", "def __getitem__(self, key):\n\n if type(key) != self.type:\n raise TypeError\n\n first_char = key[:1]\n others = key[1:]\n\n if first_char not in self.children:\n print(\"FIRST_CHAR\", first_char)\n print(\"self.children\", self.children)\n raise KeyError\n\n if len(first_char) != 0 and len(others) == 0:\n node = self.children[first_char]\n\n if node.value is None:\n raise KeyError\n\n return node.value\n else:\n return self.children[first_char][others]", "def __parse_key(self, key_str):\n key_type = key_str[0]\n full_column, value = key_str[1:].split(COL_SEPARATOR,1)\n table_name, column_name = full_column.split('.')\n if key_type == NUMBER_KEY_TYPE:\n value = int(value)\n elif key_type == DATE_KEY_TYPE:\n t_data = time.strptime(value, '%Y-%m-%d')\n value = datetime.date(t_data[0], t_data[1], t_data[2])\n\n return table_name, column_name, value", "def string_to_keypair(self, data): \n return keypair_lst", "def __getitem__(self, key ):\n return self.getSequence( key, \"+\", 0, 0, as_array = True )", "def __parse(self) -> object:\r\n char = self.data[self.idx: self.idx + 1]\r\n if char in [b'1', b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b'0']:\r\n str_len = int(self.__read_to(b':'))\r\n return self.__read(str_len)\r\n elif char == b'i':\r\n self.idx += 1\r\n return int(self.__read_to(b'e'))\r\n elif char == b'd':\r\n return self.__parse_dict()\r\n elif char == b'l':\r\n return self.__parse_list()\r\n elif char == b'':\r\n raise DecodingError('Unexpected End of File at index position of {0}.'.format(str(self.idx)))\r\n else:\r\n raise DecodingError('Invalid token character ({0}) at position {1}.'.format(str(char), str(self.idx)))", "def next(self):\n if LongObjectHashMap.self.modCount != self.expectedModCount:\n raise ConcurrentModificationException()\n if not self.hasNext():\n raise NoSuchElementException()\n keys = LongObjectHashMap.self.keys\n self.count += 1\n if self.prevKey != self.EMPTY_KEY:\n self.innerIndex += 1\n while len(keys):\n if keys[self.index] != None:\n while len(length):\n key = keys[self.index][self.innerIndex]\n value = self.values[self.index][self.innerIndex]\n if key == self.EMPTY_KEY:\n break\n self.lastReturned = key\n self.prevKey = key\n self.prevValue = value\n return self.prevValue\n self.innerIndex += 1\n self.innerIndex = 0\n self.index += 1\n raise NoSuchElementException()", "def loader_from_key(key):\n\n if \":\" in key:\n return key.split(\":\")\n return key, None", "def parse(self, whole_api_key: str):\n\n if StringExtension.is_none_or_white_space(whole_api_key):\n return ApiKeyParseResult.InvalidEmptyOrWhitespace\n\n if len(whole_api_key) != 61:\n return ApiKeyParseResult.InvalidKeyLength\n\n if whole_api_key.index('.') == -1:\n return ApiKeyParseResult.InvalidKeyFormat\n\n public_part_end = whole_api_key[0:50].index('.')\n if public_part_end == -1:\n return ApiKeyParseResult.InvalidUnableToExtractPublicPart\n\n public_part = whole_api_key[0:public_part_end]\n if len(public_part) != 20:\n return ApiKeyParseResult.InvalidPublicPartLength\n\n if len(whole_api_key) <= public_part_end + 1:\n return ApiKeyParseResult.InvalidUnableToExtractSecretPart\n\n private_part = whole_api_key[public_part_end + 1:len(whole_api_key)]\n if len(private_part) != 40:\n return ApiKeyParseResult.InvalidSecretPartLength\n\n return ApiKeyParseResult.Success", "def next(self) -> None:\n\n self._skip_space()\n beg = self.pos\n while self.pos < len(self.input) and self.input[self.pos] not in SPACES:\n self.pos += 1\n\n if beg != self.pos:\n self.current_word = self.input[beg:self.pos]\n self.start_word = beg\n else:\n self.current_word = None\n\n self.count += 1", "def __getitem__(self, key):\n self.move_to_end(key)\n return OrderedDict.__getitem__(self, key)", "def test_unpack_3(self):\n key = ('item name', str)\n assert lws.parse_schema_key(key) == (str, '.*', '')", "def read_typed_(self, start_key=\"\", end_key=None):\n if end_key == \"\":\n return\n start_key_entry = None\n if start_key:\n if isinstance(start_key, unicode):\n try:\n start_key = str(start_key)\n except:\n pass\n if not isinstance(start_key, str):\n raise ValueError(\"start must be <type 'str'> got: %s\" % type(start_key))\n start_key_entry = (start_key, )\n end_key_entry = None\n if end_key:\n if isinstance(end_key, unicode):\n try:\n end_key = str(end_key)\n except:\n pass\n if not isinstance(end_key, str):\n raise ValueError(\"end must be <type 'str'> got: %s\" % type(end_key))\n end_key_entry = (end_key, )\n \n split_entry = []\n for entry in self.read_entries_(start_key_entry, end_key_entry):\n if len(entry) == 2:\n if split_entry:\n self.not_read.append(split_entry[0][0])\n yield entry\n elif len(entry) == 5:\n if entry[1] == 0:\n if split_entry:\n self.not_read.append(split_entry[0][0])\n split_entry = [entry]\n elif (split_entry and split_entry[0][0] == entry[0] and\n len(split_entry) == int(entry[1]) and\n split_entry[0][3] == entry[3]):\n split_entry.append(entry)\n if split_entry and len(split_entry) == int(split_entry[0][2]):\n value = \"\".join([x[4] for x in split_entry])\n yield entry[0], value\n split_entry = []", "def next(self):\n self.iterator.next()\n return self.iterator.prevKey", "def Read(key):\n aes = json.loads(key)\n hmac_val = aes['hmacKey']\n return AesKey(aes['aesKeyString'],\n HmacKey(hmac_val['hmacKeyString'], hmac_val['size']),\n aes['size'], keyinfo.GetMode(aes['mode']))", "def parse_pairs(self):\n pass", "def __getitem__(self, (essid, key)):\n buf = self.cli.essids.getitem(essid, key)\n if buf:\n results = PYR2_Buffer()\n results.unpack(buf.data)\n if results.essid != essid:\n raise StorageError(\"Invalid ESSID in result-collection\")\n return results\n else:\n raise KeyError", "def _normalizeKeySlice(self, key):\n if key.start is None:\n kstart = (0, 0)\n else:\n kstart = key.start\n\n if key.stop is None:\n kstop = (self.width, self.height)\n else:\n kstop = key.stop\n\n if key.step is None:\n kstep = (1, 1)\n elif isinstance(key.step, int):\n # if only one int is specified, use it for both steps\n kstep = (key.step, key.step)\n else:\n kstep = key.step\n\n # x1 & y1 should be top-left, x2 & y2 should be bottom-right\n # So swap these values if need be.\n x1, y1 = kstart\n x2, y2 = kstop\n if x1 > x2:\n x1, x2 = x2, x1\n if y1 > y2:\n y1, y2 = y2, y1\n\n try:\n x1, y1 = self._convertNegativeTupleKeyToPositiveTupleKey((x1, y1))\n\n # Because x2 and y2 can go 1 past the end of the max index, the\n # _convertNegativeTupleKeyToPositiveTupleKey() may raise an exception.\n # So we need to pass dummy values so the exception isn't raised.\n if x2 != self.width and x2 != -(self.width - 1) and \\\n y2 != self.height and y2 != -(self.height - 1):\n x2, y2 = self._convertNegativeTupleKeyToPositiveTupleKey((x2, y2))\n elif x2 != self.width and x2 != -(self.width - 1):\n x2, _dummy = self._convertNegativeTupleKeyToPositiveTupleKey((x2, 0))\n elif y2 != self.height and y2 != -(self.height - 1):\n _dummy, y2 = self._convertNegativeTupleKeyToPositiveTupleKey((0, y2))\n else:\n pass # In this case, we don't need to adust x2 and y2 at all. So do nothing.\n except KeyError:\n raise PyTextCanvasException('key must be a tuple of two ints')\n\n return (x1, y1, x2, y2, kstep[0], kstep[1])", "def __getitem__(self, key):\n if not isinstance(key, str) or '.' not in key:\n return dict.__getitem__(self, key)\n obj, token = _descend(self, key)\n return _get(obj, token)", "def keyFileParser(keyFileName):\n\n keyFile = open(keyFileName,'rb')\n keyFileHeader = keyFileHeaderReader(keyFileName)\n if not keyFileHeader: return 0 # error\n keyFile.seek(keyFileHeader[0])\n needToSwap = keyFileHeader[1]\n\n svList = []\n while 1:\n try:\n\t record = array.array('I')\n\t record.fromfile(keyFile,6)\n\t if needToSwap: record.byteswap()\n\t syncValue = (record[0],record[1],record[2])\n\t svList.append(syncValue)\n except EOFError:\n\t break\n keyFile.close()\n svList.sort()\n return svList", "def successor(self, key):\r\n index = self.locate_successor(key)\r\n self.keys[index] if index < self.num_keys() else None", "def __getitem__(self, key):\n if len(self._data) == self.size:\n return(self._data[(key + self.index) % self.size])\n else:\n return(self._data[key])", "def next_session_key(self, session_key):\r\n\t\t## verify hashcode\r\n\t\tif self.__hash == \"\":\r\n\t\t\traise VDOM_exception_sec(\"Hash code is empty\")\r\n\r\n\t\tfor idx in xrange(len(self.__hash)):\r\n\t\t\ti = self.__hash[idx]\r\n\t\t\tif not str(i).isdigit():\r\n\t\t\t\traise VDOM_exception_sec(\"Hash code contains non-digit letter \\\"%c\\\"\" % str(i))\r\n\t\tresult = 0\r\n\t\tfor idx in xrange(len(self.__hash)):\r\n\t\t\ti = self.__hash[idx]\r\n\t\t\tresult += int(self.__calc_hash(session_key, int(i)))\r\n\t\treturn (\"0\"*10 + str(result)[0:10])[-10:]", "def IsKey(possibleKey):\n if FirstCharIsPunctuationChar(possibleKey):\n return possibleKey[1] == '|'\n else:\n return possibleKey[0] == '|'", "def _parse_table(self): # type: (Optional[str]) -> Tuple[Key, Item]\n indent = self.extract()\n self.inc() # Skip opening bracket\n\n is_aot = False\n if self._current == \"[\":\n if not self.inc():\n raise self.parse_error(UnexpectedEofError)\n\n is_aot = True\n\n # Key\n self.mark()\n while self._current != \"]\" and self.inc():\n pass\n\n name = self.extract()\n key = Key(name, sep=\"\")\n\n self.inc() # Skip closing bracket\n if is_aot:\n # TODO: Verify close bracket\n self.inc()\n\n cws, comment, trail = self._parse_comment_trail()\n\n result = Null()\n values = Container()\n\n while not self.end():\n item = self._parse_item()\n if item:\n _key, item = item\n if not self._merge_ws(item, values):\n values.append(_key, item)\n else:\n if self._current == \"[\":\n _, name_next = self._peek_table()\n\n if self._is_child(name, name_next):\n key_next, table_next = self._parse_table()\n key_next = Key(key_next.key[len(name + \".\") :])\n\n values.append(key_next, table_next)\n\n # Picking up any sibling\n while not self.end():\n _, name_next = self._peek_table()\n\n if not self._is_child(name, name_next):\n break\n\n key_next, table_next = self._parse_table()\n key_next = Key(key_next.key[len(name + \".\") :])\n\n values.append(key_next, table_next)\n else:\n table = Table(\n values, Trivia(indent, cws, comment, trail), is_aot\n )\n\n result = table\n if is_aot and (\n not self._aot_stack or name != self._aot_stack[-1]\n ):\n result = self._parse_aot(table, name)\n\n break\n else:\n raise self.parse_error(\n InternalParserError,\n (\"_parse_item() returned None on a non-bracket character.\"),\n )\n\n if isinstance(result, Null):\n result = Table(values, Trivia(indent, cws, comment, trail), is_aot)\n\n return key, result", "def __getitem__(self, (essid, key)):\n try:\n fname = self.essids[essid][1][key]\n except IndexError:\n raise IndexError(\"No result for ESSID:Key (%s:%s)\" % (essid, key))\n else:\n with open(fname, 'rb') as f:\n buf = f.read()\n if buf.startswith('PYR2'):\n results = PYR2_Buffer()\n elif buf.startswith('PYRT'):\n results = PYRT_Buffer()\n else:\n raise StorageError(\"Header for '%s' unknown.\" % (fname,))\n results.unpack(buf)\n if results.essid != essid:\n raise StorageError(\"Invalid ESSID in result-collection\")\n return results", "def Read(key):\n aes = json.loads(key)\n hmac = aes['hmacKey']\n return AesKey(aes['aesKeyString'],\n HmacKey(hmac['hmacKeyString'], hmac['size']),\n aes['size'], keyinfo.GetMode(aes['mode']))", "def next_akt_key(self):\r\n return self._tokens[4]", "def __parse_dict(self) -> OrderedDict:\r\n self.idx += 1\r\n d = OrderedDict()\r\n key_name = None\r\n while self.data[self.idx: self.idx + 1] != b'e':\r\n if key_name is None:\r\n key_name = self.__parse()\r\n else:\r\n d[key_name] = self.__parse()\r\n key_name = None\r\n self.idx += 1\r\n return d", "def _parse_next_start(self, item):\n return parse(\" \".join(item.split(\"–\")[:-1]))", "def key(self):\n return self._start_position", "def __getitem__(self, key) -> BTSNode:\n pass", "def __getitem__(self, key):\n key_split = key.split('.')\n last_index = len(key_split) - 1\n current = self\n for i, k in enumerate(key_split):\n try:\n current = getattr(current, k)\n except KeyError:\n if i == last_index:\n raise\n temp_dict = DotDictWithAcquisition()\n temp_dict.__dict__['_parent'] = weakref.proxy(current)\n current = temp_dict\n return current", "def _handle_result_by_key(self, key):\n invalid_options = ('key', 'keys', 'startkey', 'endkey')\n if any(x in invalid_options for x in self.options):\n raise ResultException(102, invalid_options, self.options)\n return self._ref(key=key, **self.options)", "def _read(self, valid):\n start = self.pos\n while valid(self.char) and self.pos < self.length:\n self._read_char()\n\n return self.data[start : self.pos]", "def handle(self, event):\n try:\n log.debug(\"handle-key\", key=event[\"Key\"])\n prefix = event[\"Key\"].split(\"/\")[0]\n if not event[\"Value\"]:\n log.debug(\"ignore-key\", key=event[\"Key\"], reason=\"empty value\")\n return\n getattr(self, prefix)(event)\n except BaseException as e: # noqa\n # This must be a bare-except as it protects threads and the main\n # loop from dying. It could be that I'm wrong, but I'm leaving this\n # in for good measure.\n log.exception(\n \"handle-key-failed\", key=event.get(\"Key\", None), exc_info=True\n )\n log.debug(\"finish-handle-key\", key=event.get(\"Key\", None))", "def __getitem__(self, key):\n \n if type(key) is int or type(key) is long:\n if key >= len(self):\n raise IndexError, \"Index ({:d}) is bigger than my length ({:d})\".format(key, self.total_events)\n if key < 0:\n if (-key) > len(self):\n raise IndexError, \"Index ({:d}) is too small for my length ({:d})\".format(key, self.total_events)\n key = len(self) + key\n \n f = None\n for f in self:\n key -= 1\n if key < 0: break\n return f\n\n elif type(key) is slice:\n (start, stop, stride) = key.indices(self.total_events)\n valid = range(start, stop, stride)\n retval = []\n counter = 0\n for f in self:\n if counter in valid:\n retval.append(f)\n counter += 1\n return retval", "def _parse_tokens(self, result: dict, token_ind: int, depth: int=0):\n while token_ind < len(self._tokens):\n cur_token = self._tokens[token_ind]\n if cur_token == ',': # redundant commas that we simply ignore everywhere except list \"[x, y, z...]\"\n token_ind += 1\n continue\n if cur_token == '}':\n return token_ind + 1\n next_token = self._tokens[token_ind + 1]\n if next_token == '{':\n result[cur_token] = dict()\n token_ind = self._parse_tokens(result[cur_token], token_ind + 2, depth + 1)\n elif next_token == ':':\n next_next_token = self._tokens[token_ind + 2]\n if next_next_token == '[':\n result[cur_token] = list()\n token_ind = self._parse_list(result[cur_token], token_ind + 3)\n else:\n if cur_token not in result:\n result[cur_token] = self._tokens[token_ind + 2]\n else:\n if not isinstance(result[cur_token], list):\n old_val = result[cur_token]\n result[cur_token] = [old_val]\n result[cur_token].append(self._tokens[token_ind + 2])\n token_ind += 3\n else:\n raise Error('Wrong character \"{}\" in position {}'.format(next_token, token_ind))\n if depth != 0:\n raise Error('Input/output braces mismatch.')\n return token_ind", "def __getitem__(self, key):\n #print( key)\n\n if key == '':\n return self\n\n if not isinstance(key, str):\n return self.__getattr__(key)\n elif not '.' in key:\n return self.__getattr__(key)\n else:\n k1, k2 = key.split('.', 1)\n return self.__getattr__(k1)[k2]", "def test__parse_key_unquoted(value, position, expected_output, expected_position):\n state = ParserState(value)\n state.position = position\n \n output = parse_key_unquoted(state)\n vampytest.assert_instance(output, tuple)\n vampytest.assert_eq(output, expected_output)\n vampytest.assert_eq(state.position, expected_position)", "def next_gte_pair(self, key):\n\n node = self.__next_gte_node(self.node, key)\n\n if node == None:\n return None, None\n return (node.key, node.vp)", "def parse_item(self, terminator):\n c = self.skip_ws\n if c == \"[\":\n result = self.parse_group()\n c = self.skip_ws\n else:\n name = self.name\n if name != \"\":\n c = self.next\n\n result = ListenerItem(name=name)\n\n if c in \"+-\":\n result.name += \"*\"\n result.metadata_defined = c == \"+\"\n cn = self.skip_ws\n result.metadata_name = metadata = self.name\n if metadata != \"\":\n cn = self.skip_ws\n\n result.is_any_trait = (\n (c == \"-\") and (name == \"\") and (metadata == \"\")\n )\n c = cn\n\n if result.is_any_trait and (\n not (\n (c == terminator)\n or ((c == \",\") and (terminator == \"]\"))\n )\n ):\n self.error(\"Expected end of name\")\n elif c == \"?\":\n if len(name) == 0:\n self.error(\"Expected non-empty name preceding '?'\")\n result.name += \"?\"\n c = self.skip_ws\n\n cycle = c == \"*\"\n if cycle:\n c = self.skip_ws\n\n if c in \".:\":\n result.notify = c == \".\"\n next = self.parse_item(terminator)\n if cycle:\n last = result\n while last.next is not None:\n last = last.next\n last.next = lg = ListenerGroup(items=[next, result])\n result = lg\n else:\n result.next = next\n\n return result\n\n if c == \"[\":\n is_closing_bracket = self.skip_ws == \"]\"\n next_char = self.skip_ws\n item_complete = next_char == terminator or next_char == \",\"\n if is_closing_bracket and item_complete:\n self.backspace\n result.is_list_handler = True\n else:\n self.error(\"Expected '[]' at the end of an item\")\n else:\n self.backspace\n\n if cycle:\n result.next = result\n\n return result", "def __getitem__(self, key):\r\n r = self._get_raw_input()[key]\r\n if isinstance(r, list):\r\n return r[0]\r\n return r", "def __getitem__(self, key):\n if not isinstance(key, int):\n raise TypeError\n if key < 0 or key >= len(self.data):\n raise IndexError\n batch = self.data[key]\n batch_size = len(batch)\n batch = list(zip(*batch))\n assert len(batch) == 6\n\n # orig_idx = lens\n token_ids = np.array(seq_padding(batch[0], self.max_len))\n s_start, s_end = np.array(batch[1]), np.array(batch[2])\n o_labels = np.array(batch[3])\n distance_to_s = np.array(seq_padding(batch[4], self.max_len))\n mask = np.array(seq_padding(batch[5], self.max_len))\n\n # print(token_ids, s_start, s_end, o_labels)\n\n return (token_ids, distance_to_s, s_start, s_end, o_labels, mask)", "def _parse_from_ref(cls, pairs=None, flat=None,\n reference=None, serialized=None, urlsafe=None,\n app=None, namespace=None, parent=None):\n if cls is not Key:\n raise TypeError('Cannot construct Key reference on non-Key class; '\n 'received %r' % cls)\n if (bool(pairs) + bool(flat) + bool(reference) + bool(serialized) +\n bool(urlsafe) + bool(parent)) != 1:\n raise TypeError('Cannot construct Key reference from incompatible '\n 'keyword arguments.')\n if urlsafe:\n serialized = _DecodeUrlSafe(urlsafe)\n if serialized:\n reference = _ReferenceFromSerialized(serialized)\n if reference:\n reference = _ReferenceFromReference(reference)\n pairs = []\n elem = None\n path = reference.path()\n for elem in path.element_list():\n kind = elem.type()\n if elem.has_id():\n id_or_name = elem.id()\n else:\n id_or_name = elem.name()\n if not id_or_name:\n id_or_name = None\n tup = (kind, id_or_name)\n pairs.append(tup)\n if elem is None:\n raise RuntimeError('Key reference has no path or elements (%r, %r, %r).'\n % (urlsafe, serialized, str(reference)))\n # TODO: ensure that each element has a type and either an id or a name\n # You needn't specify app= or namespace= together with reference=,\n # serialized= or urlsafe=, but if you do, their values must match\n # what is already in the reference.\n ref_app = reference.app()\n if app is not None:\n if app != ref_app:\n raise RuntimeError('Key reference constructed uses a different app %r '\n 'than the one specified %r' %\n (ref_app, app))\n ref_namespace = reference.name_space()\n if namespace is not None:\n if namespace != ref_namespace:\n raise RuntimeError('Key reference constructed uses a different '\n 'namespace %r than the one specified %r' %\n (ref_namespace, namespace))\n return (reference, tuple(pairs), ref_app, ref_namespace)", "def parse_dot_key(data, key):\n for key_part in key.split('.'):\n data = data.get(key_part)\n if data is None:\n break\n return data", "def __getitem__(self, key):\n key_split = key.split('.')\n current = self\n for k in key_split:\n current = getattr(current, k)\n return current", "def _next_char(self):\r\n\r\n if self._index >= len(self._input_string):\r\n return None\r\n\r\n ret = self._input_string[self._index]\r\n self._index += 1\r\n return ret", "def get(self, key):\n\t\treturn self.__get(key, key[1:])", "def locate_successor(self, key):\r\n index = 0\r\n while index < self.num_keys() and self.keys[index] <= key:\r\n index += 1\r\n return index", "def __next__(self):\n self._k += 1\n if self._k < len(self._seq):\n return(self._seq[self._k])\n else:\n # print('*** End of iteration. ***')\n raise StopIteration()", "def handle_key(self, k):\n\t\treturn False", "def read(self, start_key=\"\", end_key=None, limit=None):\n count = 0\n for key, value in self.read_typed_(start_key, end_key):\n if value[0] == recordio_entry_types.STRING:\n yield key, value[1:]\n elif value[0] == recordio_entry_types.MARSHAL:\n yield key, marshal.loads(value[1:])\n elif value[0] == recordio_entry_types.CPICKLE:\n yield key, cPickle.loads(value[1:])\n else:\n raise ValueError()\n count += 1\n if limit != None and count >= limit:\n break", "def kpnext(self, k: int) -> int:\n result = self._read_inline(f\"kpnext({k})\")\n return int(result)", "def __getitem__(self, key):\n while self is not None:\n try:\n return self._dict[key]\n except KeyError:\n self = self.parent\n raise KeyError(\"%s was not declared\" % key)", "def __getitem__(self, key):\n for entry_key, value in self.read(key):\n if entry_key != key:\n raise KeyError(key)\n return value\n raise KeyError(key)", "def _search_prefix(self, key, node):\n result = None\n\n number_of_matching_chars = node.get_number_of_matching_characters(key)\n\n if number_of_matching_chars == len(key) and number_of_matching_chars <= len(node.key):\n result = node\n\n elif node.key == \"\" or (number_of_matching_chars < len(key) and number_of_matching_chars >= len(node.key)):\n new_text = key[number_of_matching_chars:]\n for child in node.children:\n if child.key.startswith(new_text[0]):\n result = self._search_prefix(new_text, child)\n break\n\n return result", "def next(self):\n self.pos += 1\n self.current_char = None if self.pos >= len(self.input) else self.input[self.pos]", "def _checkKey(self, key):\n x, y = self._convertNegativeTupleKeyToPositiveTupleKey(key)\n return x, y", "def from_knx(self, data):\n pos = self.header.from_knx(data)\n\n self.init(self.header.service_type_ident)\n pos += self.body.from_knx(data[pos:])\n\n if pos != len(data):\n raise CouldNotParseKNXIP(\"KNXIP data has wrong length\")\n\n return pos", "def getKeyFromString(self, key_str, errors=None, add_default=True):\n if key_str:\n comps = key_str.split('.')\n new_key = self\n for key in comps:\n if hasattr(new_key, key):\n new_key=getattr(new_key, key)\n elif isinstance(errors, Errors):\n errors.Add(ErrMsg.Error.Validation.Invalidkey, key)\n return None\n else:\n return None\n\n # if key_str is only a partial keychain, add default keys to complete it.\n if len(comps) < 3 and add_default:\n return self._defaultKeyChain(new_key, errors)\n else:\n return new_key\n else:\n return self", "def parse_data_value(self, value):\n #print('parsing: {}'.format(value))\n if len(value) == 0:\n return value\n elif value[0] == '(' and value[-1] == ')':\n newdict = {}\n cur_level = 0\n cur_key = []\n cur_value = []\n cur_inner = []\n state = 0\n first_key_pass = False\n for char in value[1:-1]:\n\n # State 0 - reading key\n if state == 0:\n if char == '=':\n state = 1\n elif first_key_pass and char == ',':\n pass\n else:\n cur_key.append(char)\n first_key_pass = False\n\n # State 1 - reading value\n elif state == 1:\n if char == ',':\n newdict[''.join(cur_key)] = self.parse_data_value(''.join(cur_value))\n cur_key = []\n cur_value = []\n cur_inner = []\n first_key_pass = True\n state = 0\n elif char == '(':\n cur_level += 1\n cur_inner.append(char)\n state = 2\n else:\n cur_value.append(char)\n\n # State 2 - Reading first char of an inner paren stanza\n elif state == 2:\n if char == '(':\n newdict[''.join(cur_key)] = []\n state = 4\n at_first = True\n else:\n state = 3\n\n # State 3 - reading a regular inner dict\n if state == 3:\n if char == '(':\n cur_level += 1\n elif char == ')':\n cur_level -= 1\n cur_inner.append(char)\n if cur_level == 0:\n newdict[''.join(cur_key)] = self.parse_data_value(''.join(cur_inner[1:-1]))\n cur_key = []\n cur_value = []\n cur_inner = []\n first_key_pass = True\n state = 0\n\n # State 4 - Reading a list\n elif state == 4:\n if char == '(':\n cur_level += 1\n if not at_first:\n cur_inner.append(char)\n elif char == ')':\n cur_level -= 1\n cur_inner.append(char)\n\n if cur_level == 1:\n newdict[''.join(cur_key)].append(self.parse_data_value(''.join(cur_inner)))\n cur_inner = []\n\n elif cur_level == 0:\n cur_key = []\n cur_value = []\n cur_inner = []\n first_key_pass = True\n state = 0\n\n elif cur_level == 1 and char == ',':\n pass\n\n else:\n cur_inner.append(char)\n\n at_first = False\n\n # Clean up, depending on our state\n if state == 0:\n pass\n elif state == 1:\n newdict[''.join(cur_key)] = self.parse_data_value(''.join(cur_value))\n else:\n raise Exception(\"shouldn't be able to get here\")\n\n return newdict\n else:\n\n # Check for quoted values, and don't split commas inside them.\n # Also don't try to parse mismatched quotes. We're just being\n # even more stupid about it and converting commas in quotes to\n # unicode snowmen, temporarily\n new_value = value\n replace_comma = u\"\\u2603\"\n quote_parts = value.split('\"')\n if len(quote_parts) > 1 and len(quote_parts) % 2 == 1:\n new_val_list = []\n for (idx, part) in enumerate(quote_parts):\n if idx % 2 == 1:\n new_val_list.append(part.replace(',', replace_comma))\n else:\n new_val_list.append(part)\n new_value = '\"'.join(new_val_list)\n\n parts = [p.replace(replace_comma, ',') for p in new_value.split(',')]\n if len(parts) == 1:\n # See the comment on the other side of the `if` here. We may have\n # a single-element dict.\n if '=' in value:\n newdict = {}\n (key, val) = value.split('=', 1)\n newdict[key] = val\n return newdict\n else:\n return value\n else:\n # This is hokey, and a byproduct of the stupid way we're parsing\n # this stuff (and is susceptible to corner cases) - anyway, at\n # this point we MAY have a dict, or we may just have a string\n # which happens to have a comma in it. We'll just test the first\n # element and see if there's an equals sign in it. If it does,\n # then we'll parse it as a dict. If not, just return as a string.\n if '=' in parts[0]:\n newdict = {}\n for part in parts:\n (key, val) = part.split('=', 1)\n newdict[key] = val\n return newdict\n else:\n return value", "def __getitem__(self, key):\n if self.level > 0:\n if isinstance(key, tuple):\n assert len(key) > 0\n assert len(key) <= self.level\n if len(key) == 1:\n return self._subSlots[key[0]]\n else:\n return self._subSlots[key[0]][key[1:]]\n return self._subSlots[key]\n else:\n if self.meta.shape is None:\n # Something is wrong. Are we cancelled?\n Request.raise_if_cancelled()\n if not self.ready():\n msg = \"This slot ({}.{}) isn't ready yet, which means \" \\\n \"you can't ask for its data. Is it connected?\".format(self.getRealOperator().name, self.name)\n self.logger.error(msg)\n slotInfoMsg = \"Can't get data from slot {}.{} yet.\"\\\n \" It isn't ready.\"\\\n \"First upstream problem slot is: {}\"\\\n \"\".format( self.getRealOperator().__class__, self.name, Slot._findUpstreamProblemSlot(self) )\n self.logger.error(slotInfoMsg)\n raise Slot.SlotNotReadyError(\"Slot isn't ready. See error log.\")\n assert self.meta.shape is not None, \\\n (\"Can't ask for slices of this slot yet:\"\n \" self.meta.shape is None!\"\n \" (operator {} [self={}] slot: {}, key={}\".format(\n self.operator.name, self.operator, self.name, key))\n return self(pslice=key)", "def __getitem__(self, key):\n for section in self.header.values():\n for ckey, val in section.items():\n if ckey == key:\n return val\n else:\n raise ValueError", "def __getitem__(self, key):\n hash_ = self._hash(key)\n start = bisect.bisect(self._keys, hash_)\n if start == len(self._keys):\n start = 0\n return self._nodes[self._keys[start]]", "def __getitem__(self, key):\n hash_ = self._hash(key)\n start = bisect.bisect(self._keys, hash_)\n if start == len(self._keys):\n start = 0\n return self._nodes[self._keys[start]]", "def __getitem__(self, key):", "def iteritems(self):\n\t\tself.filep.seek(self.start + 2048)\n\n\t\t# iterate until we hit the enddata marker\n\t\twhile self.filep.tell() < self.enddata - 1:\n\t\t\t# fetch the lengths of the key and value\n\t\t\t(klen, vlen) = unpack('<LL', self.filep.read(8))\n\n\t\t\t# yield the key and value as a tuple\n\t\t\tyield (self.filep.read(klen), self.filep.read(vlen))", "def __getitem__(self, key):\n return self.__buffer[key]", "def __getitem__(self, key) -> Union[int, Tuple[int]]:\n\n if type(key) is slice:\n start = key.start\n stop = key.stop\n step = key.step\n\n if start is None:\n start = min(self._memory.keys())\n if stop is None:\n stop = max(self._memory.keys()) + 1\n if step is None:\n step = 1\n\n return tuple(self._memory[i] for i in range(start, stop, step))\n\n return self._memory[key]" ]
[ "0.63221157", "0.6206109", "0.6151237", "0.5712115", "0.5649564", "0.53943276", "0.53714085", "0.5363242", "0.5210409", "0.51909065", "0.5114601", "0.50023776", "0.4978723", "0.49580935", "0.49418062", "0.4941112", "0.49383867", "0.49335584", "0.4920512", "0.49124452", "0.49099967", "0.4908367", "0.49073866", "0.49008515", "0.48807466", "0.48693874", "0.4862788", "0.4851125", "0.48306853", "0.48302117", "0.4828318", "0.4824507", "0.48196688", "0.4799949", "0.47984973", "0.4796842", "0.4794253", "0.4787337", "0.478574", "0.4773369", "0.47612116", "0.47598207", "0.47590265", "0.4751279", "0.47493267", "0.4746046", "0.4741237", "0.47364363", "0.47304523", "0.4708657", "0.4705772", "0.46886283", "0.46855572", "0.46824524", "0.46652043", "0.46649268", "0.4657672", "0.4653014", "0.46462157", "0.46351454", "0.46348593", "0.46341288", "0.46268964", "0.46102092", "0.46051776", "0.4590995", "0.45808604", "0.45803598", "0.45710388", "0.45645747", "0.45636532", "0.45570776", "0.45566052", "0.4523392", "0.4519109", "0.45187125", "0.45115858", "0.45087785", "0.45078415", "0.45033804", "0.44829533", "0.4475105", "0.4472554", "0.44693056", "0.44632012", "0.44631365", "0.44629592", "0.44616258", "0.44600153", "0.4457165", "0.4446649", "0.44434363", "0.44423842", "0.44404647", "0.44384646", "0.44384646", "0.44376463", "0.4436803", "0.44312096", "0.44262612" ]
0.68402857
0
Parses a key enclosed in either single or double quotes.
def _parse_quoted_key(self): # type: () -> Key quote_style = self._current key_type = None for t in KeyType: if t.value == quote_style: key_type = t break if key_type is None: raise RuntimeError("Should not have entered _parse_quoted_key()") self.inc() self.mark() while self._current != quote_style and self.inc(): pass key = self.extract() self.inc() return Key(key, key_type, "")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_key(self): # type: () -> Key\n if self._current in \"\\\"'\":\n return self._parse_quoted_key()\n else:\n return self._parse_bare_key()", "def test__parse_key_unquoted(value, position, expected_output, expected_position):\n state = ParserState(value)\n state.position = position\n \n output = parse_key_unquoted(state)\n vampytest.assert_instance(output, tuple)\n vampytest.assert_eq(output, expected_output)\n vampytest.assert_eq(state.position, expected_position)", "def _sloppy_parse_user_and_api_data (self, key, contents):\n key_start = contents.find(key + '\"')\n if int(key_start) == -1:\n return None\n sub_contents = contents[int(key_start):]\n l = sub_contents.find('\",')\n return contents[(int(key_start)+len(key)+3):int(key_start)+l].decode('string_escape')", "def format_key(self, key):\n if self.in_quotes(key):\n key = self.format_value(key) \n else:\n if key.lower() in ALL_KEYWORDS:\n key = self.format_value(key.upper())\n else:\n key = self.format_value(self.add_quotes(key.lower()))\n\n return key", "def parse_key_string(give_dict):\n key_string = ''\n for name in give_dict:\n if name == 'announced-date' or name == 'currency' or name == 'price':\n continue\n name_value = give_dict[name].lower()\n name_value = ' '.join(name_value.split())\n key_string = \"%s %s\" % (key_string, name_value)\n # To simplify the matching script:\n # the first and last character of the key has to be a whitespace\n return \"%s \" % key_string", "def _parse_bare_key(self): # type: () -> Key\n self.mark()\n while self._current.is_bare_key_char() and self.inc():\n pass\n\n key = self.extract()\n\n return Key(key, sep=\"\")", "def key(name):\n return (\n Literal(name) ^\n (sep('\\'') + Literal(name) + sep('\\'')) ^\n (sep('\"') + Literal(name) + sep('\"')))", "def parse_key(key_id):\n\tcomment = get_key_comment(key_id)[0]\n\tregex = re.compile(\".*?\\\\((.*?)\\\\)\")\n\tcomment_bits = re.findall(regex, comment)[0].split(' ')\n\tif comment_bits[0] == sha256(comment_bits[1]).hexdigest():\n\t\treturn comment_bits[1]", "def _parse_line(line):\n\n line = line.strip( )\n\n keyword = None\n\n spos = line.find('\\\"')\n if spos > 0:\n # the quote character is preceded by a keyword here\n pos = line.find(' ')\n keyword = line[0:pos]\n\n message = line[spos+1:-1]\n\n return keyword, message", "def test_parse_simple_quote_with_double_quote(self):\n with self.assertRaisesRegexp(Exception, re.escape(\"the quote included a (\\\") character\")):\n api.parse_quote(\" We accept the love we think we \\\" deserve. - Stephen Chbosky\",\n simple_format=True)", "def _get_key_from_file(file_contents, key):\n r = re.compile('^{}\\=[\\'\\\"]*([^\\'\\\"\\n]*)'.format(key), re.MULTILINE)\n match = r.search(file_contents)\n if match:\n return match.group(1)\n else:\n return ''", "def parse_key(key_id):\n comment = get_key_comment(key_id)[0]\n regex = re.compile(\".*?\\\\((.*?)\\\\)\")\n comment_bits = re.findall(regex, comment)[0].split(' ')\n if comment_bits[0] == sha256(comment_bits[1]).hexdigest():\n return comment_bits[1]", "def _parse_item(item: str) -> dict:\n delimiter = _get_delimiter(item)\n key, value = item.split(delimiter)\n if delimiter == '=':\n return {key: value}\n else:\n try:\n return {key: json.loads(value)}\n except json.JSONDecodeError:\n raise click.UsageError(JSON_ERROR_MESSAGE.format(item))", "def decode_key(key):\n if '-tags=' in key:\n key_name, tags_json = key.split('-tags=')\n return key_name, json.loads(tags_json)\n return key, None", "def dequote(s):\n if len(s) < 2:\n return s\n elif (s[0] == s[-1]) and s.startswith(('\"', \"'\")):\n return s[1: -1]\n else:\n return s", "def get_key_value(line: str) -> str:\n if line.find('=') == -1:\n raise Exception(\"Error: Key line must have equal sign seperating name and value\")\n return line[line.find('=') + 1:]", "def test_parse_quotes_doublequote(self):\n with self.assertRaisesRegexp(Exception, re.escape(\"the quote included a (\\\") character\")):\n api.parse_quote(\" This is a quote\\\". | Author | Publication | tag1, tag2 , tag3 \",\n simple_format=False)", "def deserialize_key(key: str):\n try:\n lit = ast.literal_eval(key)\n if isinstance(lit, Hashable):\n key = lit\n except ValueError:\n pass\n return key", "def test_double_quoted(self):\n e = yaenv.core.EnvVar('key = \"value\"\\n')\n assert e.key == 'key'\n assert e.value == 'value'\n assert e._interpolate", "def _value_needs_quotes(val):\n if not val:\n return None\n val = \"\".join(str(node) for node in val.filter_text(recursive=False))\n if not any(char.isspace() for char in val):\n return None\n if \"'\" in val and '\"' not in val:\n return '\"'\n if '\"' in val and \"'\" not in val:\n return \"'\"\n return \"\\\"'\" # Either acceptable, \" preferred over '", "def test_unquote_correctness(\n unquoter_key: str,\n quoted: str,\n unquoted: str,\n) -> None:\n unquoter = unquoters[unquoter_key]\n assert unquoted == unquoter(quoted)", "def _parse_parameters(parameters):\n\n if not re.match(r'^(\\w+)=\"([^=]+)\"(\\s{1}(\\w+)=\"([^=]+)\")*$', parameters):\n raise ValueError\n\n # first we add tokens that separate key/value pairs.\n # in case of key='ss sss ss', we skip tokenizing when we se the first single quote\n # and resume when we see the second\n replace_space = True\n tokenized = \"\"\n for c in parameters:\n if c == '\\\"':\n replace_space = not replace_space\n elif c == ' ' and replace_space:\n tokenized += \"$$\"\n else:\n tokenized += c\n\n # now get the tokens\n tokens = tokenized.split('$$')\n result = {}\n for token in tokens:\n # separate key/values\n key_value = token.split(\"=\")\n result[key_value[0]] = key_value[1]\n return result", "def tp_key_value(str_tag):\n rgx_split = re.compile(r'[\\@\\(\\)\\{\\}]')\n str_key, str_value = '', ''\n\n # count the pieces\n lst_parts = rgx_split.split(str_tag)\n lng_parts = len(lst_parts)\n\n # and winnow the noise\n if lng_parts > 1:\n str_key = lst_parts[1]\n if lng_parts > 2:\n for str_value in lst_parts[2:]:\n if str_value != '':\n break\n\n return (str_key, str_value)", "def splitkv(s):\n a=re.split('(\\w*)\\s*=\\s*\"([^=\"]*)\"\\s*', s)\n a=[ t for t in a if t!='']\n return a", "def parse_var(s):\n items = s.split('=')\n key = items[0].strip() # we remove blanks around keys, as is logical\n if len(items) > 1:\n # rejoin the rest:\n value = '='.join(items[1:])\n return (key, value)", "def __kv_pair(line):\n\n splitline = line.split(\"=\")\n\n if len(splitline) <= 1:\n return None, None\n\n key = splitline[0].strip()\n\n val = \"=\".join(splitline[1:]).strip()\n\n return key, val", "def parse_string_3(string):\n string = re.sub(r\"\\\"\", \"\", string)\n return string.strip().lower()", "def parse_key_value_pairs(arg_string):\n try:\n return {key: value for (key, value) in [tuple(str(arg).split('=', 1)) for arg in arg_string]}\n except ValueError:\n raise click.ClickException(\"argument string must be in the form x=y\")", "def test_single_quoted(self):\n e = yaenv.core.EnvVar(\"key = 'value'\\n\")\n assert e.key == 'key'\n assert e.value == 'value'\n assert not e._interpolate", "def parse_var(s):\n items = s.split(\"=\")\n key = items[0].strip() # we remove blanks around keys, as is logical\n value = \"\"\n if len(items) > 1:\n # rejoin the rest:\n value = \"=\".join(items[1:])\n return key, value", "def parse_tag_key_value(key_value: str, value_required=True) -> Tuple[str, Any]:\n if not key_value:\n raise ValueError(\"key must be specified.\")\n\n if \"=\" not in key_value:\n if value_required:\n raise ValueError(f\"key=value pair expected: '{key_value}'\")\n return (key_value, ANY_VALUE)\n\n key, value = key_value.split(\"=\", 1)\n if not key:\n raise ValueError(f\"key must be specified: '{key_value}'\")\n return (key, parse_tag_value(value))", "def _dequote(value: str) -> str:\n if value[0] == '\"' and value[-1] == '\"':\n return value[1:-1]\n return value", "def is_key_string(string):\r\n return len(string) > 1 and string[0] == '_'", "def test_key_str(self):\n key = Key({\"warning\": False, \"inCar\": True})\n\n string = str(key)\n assert isinstance(string, str)\n assert string == \"{'warning': False, 'in_car': True}\"", "def _clean_key_type(key_name, escape_char=ESCAPE_SEQ):\n\n for i in (2, 1):\n\n if len(key_name) < i:\n return None, key_name\n\n type_v = key_name[-i:]\n\n if type_v in _KEY_SPLIT:\n if len(key_name) <= i:\n return _KEY_SPLIT[type_v], ''\n\n esc_cnt = 0\n for pos in range(-i - 1, -len(key_name) - 1, -1):\n if key_name[pos] == escape_char:\n esc_cnt += 1\n else:\n break\n\n if esc_cnt % 2 == 0:\n return _KEY_SPLIT[type_v], key_name[:-i]\n else:\n return None, key_name\n\n return None, key_name", "def tokenize_key_value_pair(kv_pair):\n key, value = kv_pair.strip().split('\\t')\n key = tuple(key.strip().split())\n value = tuple(value.strip().split())\n return (key, value)", "def _decode_key(self, key):\n return key if not key or isinstance(key, str) else key.decode()", "def keyValueFromTxt(fname):\n keyValue = dict()\n fname = os.path.abspath(fname)\n print(\"Reading input from {:s}\\n\".format(fname))\n with open(fname, \"r\") as f:\n for line in f:\n # remove white spaces\n if line.startswith(\"#\") or line.startswith(\"%\"):\n continue\n if line in [\"\\n\", \"\\r\", \"\\rn\"]:\n continue\n for delim in [\"#\", \"%\"]:\n line = line.partition(delim)[0]\n line = line.strip()\n line = line.replace(\" \", \"\")\n line = line.replace('\"', \"\")\n key, value = line.split(\"=\")\n keyValue[key] = value\n\n return keyValue", "def parse_string_2(string):\n string = re.sub(r\"\\'\", \"\", string)\n return string.strip().lower()", "def dequote(x): \n if x[0] == '\"' and x[len(x)-1] == '\"':\n return x[1:len(x)-1]\n return x", "def key_value_string_value(key_value_string, key):\n if key_value_string is None or key is None:\n return None\n words = key_value_string.split(' ')\n for i in range(0, len(words)-1):\n if words[i] == key + ':':\n return words[i+1]\n return None", "def quote(s):\n return unescape(quoteattr(s))", "def test_parse_simple_quote_with_double_quote_in_author(self):\n quote = api.parse_quote(\" Hey, grades are not cool, learning is cool. - Arthur \\\"Fonzie\\\" Fonzarelli\",\n simple_format=True)\n\n # Check that it parsed correctly\n self.assertEquals(\"Arthur \\\"Fonzie\\\" Fonzarelli\", quote.author)", "def test_unquoted(self):\n e = yaenv.core.EnvVar('key = value\\n')\n assert e.key == 'key'\n assert e.value == 'value'\n assert e._interpolate", "def prepare_key(self, key):\n return smart_str(key)", "def __parse_key(self, key_str):\n key_type = key_str[0]\n full_column, value = key_str[1:].split(COL_SEPARATOR,1)\n table_name, column_name = full_column.split('.')\n if key_type == NUMBER_KEY_TYPE:\n value = int(value)\n elif key_type == DATE_KEY_TYPE:\n t_data = time.strptime(value, '%Y-%m-%d')\n value = datetime.date(t_data[0], t_data[1], t_data[2])\n\n return table_name, column_name, value", "def as_key(key):\n return key.lstrip('/').rstrip('/')", "def read_key_str(op, key, maxlen=None, fmt=None, allow_blank=False):\n if key not in op:\n return None\n assert isinstance(op[key], str), 'key `%s` was not str' % key\n assert allow_blank or op[key], 'key `%s` was blank' % key\n assert op[key] == op[key].strip(), 'invalid padding: %s' % key\n assert not maxlen or len(op[key]) <= maxlen, 'exceeds max len: %s' % key\n\n if fmt == 'hex':\n assert re.match(r'^#[0-9a-f]{6}$', op[key]), 'invalid HEX: %s' % key\n elif fmt == 'lang':\n assert op[key] in LANGS, 'invalid lang: %s' % key\n else:\n assert fmt is None, 'invalid fmt: %s' % fmt\n\n return op[key]", "def _validate_string(self, path, value, value_is_key=False):\r\n value = re.sub('[/$#{}._|*=\\-]', ' ', value)\r\n\r\n tokens = nltk.tokenize.word_tokenize(value)\r\n for raw_token in tokens:\r\n if raw_token.startswith(\"'\"):\r\n raw_token = raw_token[1:]\r\n if self.corpus.validate_token(raw_token):\r\n continue\r\n sub_tokens = Validator.camel_case_split(raw_token)\r\n ret = True\r\n for sub_token in sub_tokens:\r\n ret = ret and self.corpus.validate_token(sub_token)\r\n\r\n if not ret:\r\n self.errors.append({\r\n \"isKey\": value_is_key,\r\n \"path\": path,\r\n \"typo\": raw_token,\r\n })", "def parseLine(line):\n\n\teq = line.find('=')\n\tif eq == -1: raise Exception()\n\tkey = line[:eq].strip()\n\tvalue = line[eq+1:-1].strip()\n\treturn key, parseValue(value)", "def get_key_and_value_from_line(line):\n if line.find(\"#\") != 0 or line.find(\"!\") != 0:\n index_key_end = line.find(\"=\")\n while (index_key_end > 0) and (line[index_key_end - 1] == \"\\\\\"):\n index_key_end = line.find(\"=\", index_key_end + 1)\n if index_key_end > 0:\n return line[0:index_key_end].strip(), line[index_key_end + 1:].strip()\n return None, None", "def quoting_sane(i):\n if '\"\"' in i:\n debug('found \"\"')\n pass\n if '\"' in i:\n debug('found \"')\n\treturn i.replace('\"','')\n debug('found \"\"')\n if \"''\" in i:\n debug(\"found ''\")\n pass\n if \"'\" in i:\n debug(\"found '\")\n pass\n return i", "def get_key_name(line: str) -> str:\n if line.find('=') == -1:\n raise Exception(\"Error: Key line must have equal sign seperating name and value\")\n return line[:line.find('=')]", "def parsekv(inputString):\n mDict = dict()\n parts = inputString.split('&')\n for item in parts:\n if (item.count('=') != 1):\n raise ValueError(\"Need a singular = sign in str. %s\" % (item, ))\n key, value = item.split('=')\n # If we can convert the string value to an int, great, otherwise\n # leave it as a string.\n try:\n mDict[key] = int(value)\n except ValueError:\n mDict[key] = value\n return mDict", "def parse_key(raw_key):\n raw_key_bytes = raw_key.encode('ascii')\n try:\n validate_cmek(raw_key)\n key_type = KeyType.CMEK\n sha256 = None\n except errors.Error:\n if len(raw_key) != 44:\n raise\n key_type = KeyType.CSEK\n sha256 = hash_util.get_base64_hash_digest_string(\n hashlib.sha256(base64.b64decode(raw_key_bytes)))\n return EncryptionKey(key=raw_key, sha256=sha256, type=key_type)", "def decode_key_from_mongo(fieldname):\r\n return urllib.unquote(fieldname)", "def quote(value):\n single = value.find(\"'\")\n double = value.find('\"')\n multiline = value.find('\\n') != -1\n if multiline or ((single != -1) and (double != -1)):\n if value.find('\"\"\"') == -1 and value[0] != '\"' and value[-1] != '\"':\n s = '\"\"\"%s\"\"\"' % value\n else:\n s = \"'''%s'''\" % value\n elif (single != -1) and (double == -1):\n s = '\"%s\"' % value\n else:\n s = \"'%s'\" % value\n return s", "def kwextract(s):\n try:\n return strip(s, \"$\").strip().split(\": \")[1]\n except IndexError:\n return \"<unknown>\"", "def get_key_from_line(line):\n if line.find(\"#\") != 0 or line.find(\"!\") != 0:\n index_key_end = line.find(\"=\")\n while (index_key_end > 0) and (line[index_key_end - 1] == \"\\\\\"):\n index_key_end = line.find(\"=\", index_key_end + 1)\n if index_key_end > 0:\n return line[0:index_key_end].strip()\n return None", "def _quote(v):\n return '\"' + v + '\"' if ' ' in v else v", "def _key_in_string(string, string_formatting_dict):\n key_in_string = False\n if isinstance(string, str):\n for key, value in string_formatting_dict.items():\n if \"{\" + key + \"}\" in string:\n key_in_string = True\n return key_in_string", "def embeded_triple_quotes():\n pass", "def deserialize_key(key: bytes) -> str:\n return key.decode()", "def test_regex_doublequotehandling(self):\n with pytest.raises(yaml.scanner.ScannerError) as excinfo:\n DwcaValidator(yaml.load(self.yaml_regexitdouble, Loader=yaml.FullLoader), error_handler=WhipErrorHandler)\n assert \"found unknown escape character 'd'\" in str(excinfo.value)", "def value_from_str(self, s):\n if is_quoted_str(s):\n return s[1:-1]\n return super().value_from_str(s)", "def value_from_str(self, s):\n if is_quoted_str(s):\n return s[1:-1]\n return super().value_from_str(s)", "def test_sqpp_single_quotes(self):\n self.assertEqual(self.parser.parse_query(\"(expr1) - expr2 | 'expressions - in + quotes | (are) not - parsed ' - (expr3) | expr4\"),\n ['+', 'expr1', '-', 'expr2', '|', \"'expressions - in + quotes | (are) not - parsed '\", '-', 'expr3', '|', 'expr4'])\n #['+', '+ \\'expressions - in + quotes | (are) not - parsed \\' | expr1 | expr4',\n # '+', '- expr3 | expr1 | expr4',\n # '+', '+ \\'expressions - in + quotes | (are) not - parsed \\' - expr2 | expr4',\n # '+', '- expr3 - expr2 | expr4'])", "def test_parse_str(parser):\n doc = parser.parse('{\"hello\": \"world\"}')\n assert doc.as_dict() == {'hello': 'world'}", "def test_unpack_2(self):\n key = ('item name', str, r'name')\n assert lws.parse_schema_key(key) == (str, 'name', '')", "def keyValuePair(line, key, delim=\":\", dtype=None, linelen=None, pos=1):\n # Search for the keyword in the line\n loc = line.strip().lower().find(key.lower())\n if loc == -1:\n val = None\n else:\n # Split on the ':' following the keyword\n try:\n val = line[loc:].strip().split(delim)[pos].split()[0].strip()\n except:\n val = None\n if dtype is int:\n try:\n val = np.int(val)\n except:\n val = val\n elif dtype is float:\n try:\n val = np.float(val)\n except:\n val = val\n elif dtype is 'bracketed':\n pass\n\n return val", "def test_quoted(self):\n exp = ['(', '(', 'h ', ',', 'p', ')', 'h p', ',', \"g()[],':_\", ')', 'hpg', ';']\n content = \"((h_ ,'p')h p,'g()[],'':_')hpg;\"\n self._do_test(content, exp)\n content = \"(('h ',p)h p,'g()[],'':_')hpg;\"\n self._do_test(content, exp)", "def test_sqpp_escape_double_quotes(self):\n self.assertEqual(self.parser.parse_query('expr1 \\\\\" expr2 +(expr3) -expr4 \\\\\" + (expr5)'),\n ['+', 'expr1', '+', '\\\\\"', '+', 'expr2', '+', 'expr3', '-', 'expr4', '+', '\\\\\"', '+', 'expr5'])", "def get(self,section,key):\n value = ConfigParser.get(self,section,key)\n if value.startswith('\"') or value.startswith(\"'\"):\n return value\n if re.search(r\":\",value):\n out_dict = {}\n pieces = valuesplit(\",\")\n for piece in pieces:\n key,v = piece.split(\":\")\n out_dict[key] = translate(v)\n return out_dict\n elif re.search(\",\",value):\n values = value.split(\",\")\n return [translate(v) for v in values]\n return translate(value)", "def parse_key(self, key):\r\n if not key:\r\n self.aes = None # empty key == no encryption\r\n return self.parse_string(self.tmp) # must return size (see the next return)\r\n key.decode() # test availability\r\n size = len(key)\r\n for padding in (16, 24, 32): # fixed key size\r\n if size <= padding:\r\n break\r\n key += chr(0) * (padding - size)\r\n self.aes = AES.new(key)\r\n return self.parse_string(self.tmp) # if key changes you must update string\r", "def isquoted(token):\n\n # Token is quoted\n return token.startswith((\"'\", '\"')) and token.endswith((\"'\", '\"'))", "def parse_dot_key(data, key):\n for key_part in key.split('.'):\n data = data.get(key_part)\n if data is None:\n break\n return data", "def is_quoted(str):\n return ((len(str) > 2)\n and ((str[0] == \"'\" and str[-1] == \"'\")\n or (str[0] == '\"' and str[-1] == '\"')))", "def _requires_quotes(self, value):\n lc_value = value.lower()\n return (lc_value in self.reserved_words\n or self.illegal_initial_characters.match(value[0])\n or not self.legal_characters.match(unicode(value))\n or (lc_value != value))", "def _parse_text(node, key):\r\n element = node.find(key)\r\n if element is not None:\r\n text = element.text\r\n if text is not None:\r\n return text.strip()\r\n else:\r\n return ''\r\n else:\r\n return ''", "def _decode_key(self, key):\n if hasattr(key, \"char\"):\n return str(key.char).lower()\n elif hasattr(key, \"name\"):\n return str(key.name).lower()", "def _kv_to_dict(meta):\n try:\n return dict(m.split(\"=\", 1) for m in meta)\n except ValueError:\n raise _errors.MachineError(\"Invalid parameter (%s).\" % (meta, ))", "def _getAttributeKeyFromDBKey(dbKey):\n return dbKey[dbKey.find(\"\\x1D\")+1:]", "def test_sqpp_double_quotes(self):\n self.assertEqual(self.parser.parse_query(\n '(expr1) - expr2 | \"expressions - in + quotes | (are) not - parsed \" - (expr3) | expr4'),\n ['+', 'expr1', '-', 'expr2', '|', '\"expressions - in + quotes | (are) not - parsed \"', '-', 'expr3', '|', 'expr4'])\n #['+', '+ \"expressions - in + quotes | (are) not - parsed \" | expr1 | expr4',\n # '+', '- expr3 | expr1 | expr4',\n # '+', '+ \"expressions - in + quotes | (are) not - parsed \" - expr2 | expr4',\n # '+', '- expr3 - expr2 | expr4'])", "def test_startswith_special_character(self):\n for c in b\"\\0\", b\"\\n\", b\"\\r\", b\" \", b\":\", b\"<\":\n\n value = c + b\"value\"\n result = attributeAsLDIF(b\"key\", value)\n self.assertEqual(result, b\"key:: %s\\n\" % encode(value))", "def catch_unquoted_attrs(self, text, attrlist):\n for tup in attrlist:\n (an, av) = tup\n rgx = \"%s\\s*=\\s*\" % (an) \\\n + \"['\" \\\n + '\"]%s[\"' % (re.escape(av)) \\\n + \"']\"\n q = re.search(rgx, self.unescape(text))\n if q == None:\n self.errmsg(\"unquoted attribute in '%s'\" % (text))", "def _unescape_identifier(self, value):\n\n return value.replace('\"\"', '\"')", "def _Unquote(s):\n if not hasattr(s, 'strip'):\n return s\n # Repeated to handle both \"'foo'\" and '\"foo\"'\n return s.strip(\"\\\"'\")", "def test_allow_singlequote(self):\n val = DwcaValidator(yaml.load(self.yaml_allow3, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'sex': 'male'}\n self.assertTrue(val.validate(document))\n\n document = {'sex': 'female'}\n self.assertFalse(val.validate(document))", "def test_unpack_3(self):\n key = ('item name', str)\n assert lws.parse_schema_key(key) == (str, '.*', '')", "def get_key(command):\n return command.split(\" \")[1]", "def test_quoted_fields(parallel, read_basic):\n if parallel:\n pytest.xfail(\"Multiprocessing can fail with quoted fields\")\n text = dedent(\n \"\"\"\n \"A B\" C D\n 1.5 2.1 -37.1\n a b \" c\n d\"\n \"\"\"\n )\n table = read_basic(text, parallel=parallel)\n expected = Table(\n [[\"1.5\", \"a\"], [\"2.1\", \"b\"], [\"-37.1\", \"c\\nd\"]], names=(\"A B\", \"C\", \"D\")\n )\n assert_table_equal(table, expected)\n table = read_basic(text.replace('\"', \"'\"), quotechar=\"'\", parallel=parallel)\n assert_table_equal(table, expected)", "def loader_from_key(key):\n\n if \":\" in key:\n return key.split(\":\")\n return key, None", "def parseQuotedString(self):\n ret = libxml2mod.xmlParseQuotedString(self._o)\n return ret", "def test_sqpp_beginning_double_quotes(self):\n self.assertEqual(self.parser.parse_query('\"expr1\" - (expr2)'),\n ['+', '\"expr1\"', '-', 'expr2'])", "def test_sqpp_escape_single_quotes(self):\n self.assertEqual(self.parser.parse_query(\"expr1 \\\\' expr2 +(expr3) -expr4 \\\\' + (expr5)\"),\n ['+', 'expr1', '+', \"\\\\'\", '+', 'expr2', '+', 'expr3', '-', 'expr4', '+', \"\\\\'\", '+', 'expr5'])", "def parse_key_value_arg(self, arg_value, argname):\n result = {}\n for data in arg_value:\n\n # Split at first '=' from left\n key_value_pair = data.split(\"=\", 1)\n\n if len(key_value_pair) != 2:\n raise exceptions.InvalidKeyValuePairArgumentError(\n argname=argname,\n value=key_value_pair)\n\n result[key_value_pair[0]] = key_value_pair[1]\n\n return result", "def _parse_text(node, key):\n element = node.get(key)\n if element is not None:\n if element is not None:\n return element.strip()\n else:\n return ''\n else:\n return ''", "def test_oneWord(self):\n s = 'This code \"works.\"'\n r = text.splitQuoted(s)\n self.failUnlessEqual(['This', 'code', 'works.'], r)", "def test_odd_quotes(self):\n content = \"((h_ ,'p)h p,g()[],:_)hpg;\"\n tok = NewickTokenizer(StringIO(content))\n self.assertRaises(Exception, tok.tokens)\n content = \"((h_ ,'p')h p,'g()[]',:_')hpg;\"\n tok = NewickTokenizer(StringIO(content))\n self.assertRaises(Exception, tok.tokens)", "def test_quote_str():\n assert pmisc.quote_str(5) == 5\n assert pmisc.quote_str(\"Hello!\") == '\"Hello!\"'\n assert pmisc.quote_str('He said \"hello!\"') == \"'He said \\\"hello!\\\"'\"" ]
[ "0.7441108", "0.6559555", "0.64326805", "0.6145812", "0.6115858", "0.60998946", "0.6094155", "0.5979537", "0.5926579", "0.5861342", "0.58433485", "0.58369255", "0.5756874", "0.5747807", "0.57185864", "0.57078075", "0.5646424", "0.56063914", "0.560016", "0.55992305", "0.5583899", "0.5581713", "0.55765414", "0.55735576", "0.5530113", "0.5527467", "0.55140436", "0.5513526", "0.5511248", "0.54973674", "0.54805565", "0.5455087", "0.54401326", "0.543239", "0.54046375", "0.53905594", "0.5388771", "0.5387796", "0.5381355", "0.5354894", "0.53441954", "0.5326551", "0.5314045", "0.5308302", "0.52857524", "0.5278588", "0.5270749", "0.5260307", "0.52569646", "0.52172047", "0.52048993", "0.51921093", "0.517936", "0.5176719", "0.5155818", "0.51526046", "0.5146495", "0.51409334", "0.5140453", "0.5134007", "0.5117527", "0.5115223", "0.50990003", "0.5096941", "0.50911653", "0.50911653", "0.50815177", "0.5075784", "0.5073664", "0.50728625", "0.506446", "0.5058515", "0.5057461", "0.5056891", "0.5056", "0.50532776", "0.5042785", "0.5032825", "0.50268674", "0.5025096", "0.50100654", "0.5009813", "0.50001264", "0.4999701", "0.49990034", "0.49981", "0.49942556", "0.49868008", "0.4968453", "0.4968039", "0.49664518", "0.49647465", "0.49631748", "0.4949837", "0.49496937", "0.49491337", "0.49486727", "0.49428403", "0.4929809", "0.49263725" ]
0.6559366
2
Parses a bare key.
def _parse_bare_key(self): # type: () -> Key self.mark() while self._current.is_bare_key_char() and self.inc(): pass key = self.extract() return Key(key, sep="")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_key(self): # type: () -> Key\n if self._current in \"\\\"'\":\n return self._parse_quoted_key()\n else:\n return self._parse_bare_key()", "def parse_key(raw_key):\n raw_key_bytes = raw_key.encode('ascii')\n try:\n validate_cmek(raw_key)\n key_type = KeyType.CMEK\n sha256 = None\n except errors.Error:\n if len(raw_key) != 44:\n raise\n key_type = KeyType.CSEK\n sha256 = hash_util.get_base64_hash_digest_string(\n hashlib.sha256(base64.b64decode(raw_key_bytes)))\n return EncryptionKey(key=raw_key, sha256=sha256, type=key_type)", "def parse_key(key_id):\n\tcomment = get_key_comment(key_id)[0]\n\tregex = re.compile(\".*?\\\\((.*?)\\\\)\")\n\tcomment_bits = re.findall(regex, comment)[0].split(' ')\n\tif comment_bits[0] == sha256(comment_bits[1]).hexdigest():\n\t\treturn comment_bits[1]", "def parse_key(key_id):\n comment = get_key_comment(key_id)[0]\n regex = re.compile(\".*?\\\\((.*?)\\\\)\")\n comment_bits = re.findall(regex, comment)[0].split(' ')\n if comment_bits[0] == sha256(comment_bits[1]).hexdigest():\n return comment_bits[1]", "def _parse_quoted_key(self): # type: () -> Key\n quote_style = self._current\n key_type = None\n for t in KeyType:\n if t.value == quote_style:\n key_type = t\n break\n\n if key_type is None:\n raise RuntimeError(\"Should not have entered _parse_quoted_key()\")\n\n self.inc()\n self.mark()\n\n while self._current != quote_style and self.inc():\n pass\n\n key = self.extract()\n self.inc()\n\n return Key(key, key_type, \"\")", "def read_from(cls, s, taproot: bool = False):\n first = s.read(1)\n origin = None\n if first == b\"[\":\n prefix, char = read_until(s, b\"]\")\n if char != b\"]\":\n raise ArgumentError(\"Invalid key - missing ]\")\n origin = KeyOrigin.from_string(prefix.decode())\n else:\n s.seek(-1, 1)\n k, char = read_until(s, b\",)/\")\n der = b\"\"\n # there is a following derivation\n if char == b\"/\":\n der, char = read_until(s, b\"<{,)\")\n # legacy branches: {a,b,c...}\n if char == b\"{\":\n der += b\"{\"\n branch, char = read_until(s, b\"}\")\n if char is None:\n raise ArgumentError(\"Failed reading the key, missing }\")\n der += branch + b\"}\"\n rest, char = read_until(s, b\",)\")\n der += rest\n # multipart descriptor: <a;b;c;...>\n elif char == b\"<\":\n der += b\"<\"\n branch, char = read_until(s, b\">\")\n if char is None:\n raise ArgumentError(\"Failed reading the key, missing >\")\n der += branch + b\">\"\n rest, char = read_until(s, b\",)\")\n der += rest\n if char is not None:\n s.seek(-1, 1)\n # parse key\n k, xonly_repr = cls.parse_key(k, taproot)\n # parse derivation\n allow_hardened = isinstance(k, bip32.HDKey) and isinstance(k.key, ec.PrivateKey)\n derivation = AllowedDerivation.from_string(\n der.decode(), allow_hardened=allow_hardened\n )\n return cls(k, origin, derivation, taproot, xonly_repr)", "def decode_key(key):\n if '-tags=' in key:\n key_name, tags_json = key.split('-tags=')\n return key_name, json.loads(tags_json)\n return key, None", "def parse_dot_key(data, key):\n for key_part in key.split('.'):\n data = data.get(key_part)\n if data is None:\n break\n return data", "def deserialize_key(key: str):\n try:\n lit = ast.literal_eval(key)\n if isinstance(lit, Hashable):\n key = lit\n except ValueError:\n pass\n return key", "def deserialize_key(key: bytes) -> str:\n return key.decode()", "def parse_key(self, key):\r\n if not key:\r\n self.aes = None # empty key == no encryption\r\n return self.parse_string(self.tmp) # must return size (see the next return)\r\n key.decode() # test availability\r\n size = len(key)\r\n for padding in (16, 24, 32): # fixed key size\r\n if size <= padding:\r\n break\r\n key += chr(0) * (padding - size)\r\n self.aes = AES.new(key)\r\n return self.parse_string(self.tmp) # if key changes you must update string\r", "def test_unpack_2(self):\n key = ('item name', str, r'name')\n assert lws.parse_schema_key(key) == (str, 'name', '')", "def _decode_key(self, key):\n return key if not key or isinstance(key, str) else key.decode()", "def readKey(self, keyPath):\n\t\ttry:", "def test_unpack_3(self):\n key = ('item name', str)\n assert lws.parse_schema_key(key) == (str, '.*', '')", "def loader_from_key(key):\n\n if \":\" in key:\n return key.split(\":\")\n return key, None", "def load_key():", "def _get_raw_key(self, key_id):", "def read_public_key(f: IO[str]) -> Tuple[str, str, str, str]:\n data = f.read()\n try:\n kind, key, comment = data.split(\" \")\n if kind.startswith(\"ssh-\") and comment:\n base64.b64decode(key)\n return (kind, key, comment, data)\n except ValueError:\n pass\n\n raise click.ClickException(\"{} is not a valid SSH key\".format(f.name))", "def tp_key_value(str_tag):\n rgx_split = re.compile(r'[\\@\\(\\)\\{\\}]')\n str_key, str_value = '', ''\n\n # count the pieces\n lst_parts = rgx_split.split(str_tag)\n lng_parts = len(lst_parts)\n\n # and winnow the noise\n if lng_parts > 1:\n str_key = lst_parts[1]\n if lng_parts > 2:\n for str_value in lst_parts[2:]:\n if str_value != '':\n break\n\n return (str_key, str_value)", "def test__parse_key_unquoted(value, position, expected_output, expected_position):\n state = ParserState(value)\n state.position = position\n \n output = parse_key_unquoted(state)\n vampytest.assert_instance(output, tuple)\n vampytest.assert_eq(output, expected_output)\n vampytest.assert_eq(state.position, expected_position)", "def read_raw(self, key):\n value = None\n if key is not None:\n value = self.tcex.key_value_store.read(self._context, key.strip())\n else:\n self.log.warning('The key field was None.')\n return value", "def parse(self, whole_api_key: str):\n\n if StringExtension.is_none_or_white_space(whole_api_key):\n return ApiKeyParseResult.InvalidEmptyOrWhitespace\n\n if len(whole_api_key) != 61:\n return ApiKeyParseResult.InvalidKeyLength\n\n if whole_api_key.index('.') == -1:\n return ApiKeyParseResult.InvalidKeyFormat\n\n public_part_end = whole_api_key[0:50].index('.')\n if public_part_end == -1:\n return ApiKeyParseResult.InvalidUnableToExtractPublicPart\n\n public_part = whole_api_key[0:public_part_end]\n if len(public_part) != 20:\n return ApiKeyParseResult.InvalidPublicPartLength\n\n if len(whole_api_key) <= public_part_end + 1:\n return ApiKeyParseResult.InvalidUnableToExtractSecretPart\n\n private_part = whole_api_key[public_part_end + 1:len(whole_api_key)]\n if len(private_part) != 40:\n return ApiKeyParseResult.InvalidSecretPartLength\n\n return ApiKeyParseResult.Success", "def Read(key):\n mac = json.loads(key)\n return HmacKey(mac['hmacKeyString'], mac['size'])", "def __parse_key(self, key_str):\n key_type = key_str[0]\n full_column, value = key_str[1:].split(COL_SEPARATOR,1)\n table_name, column_name = full_column.split('.')\n if key_type == NUMBER_KEY_TYPE:\n value = int(value)\n elif key_type == DATE_KEY_TYPE:\n t_data = time.strptime(value, '%Y-%m-%d')\n value = datetime.date(t_data[0], t_data[1], t_data[2])\n\n return table_name, column_name, value", "def parse_dict(self, data):\n self.public_key = VerifyingKey(data['public_key'])\n self.signature = base58.b58decode(data['signature']) if data['signature'] else None", "def Read(key):\n mac = json.loads(key)\n return HmacKey(mac['hmacKeyString'], mac['size'])", "def Read(key):\n aes = json.loads(key)\n hmac_val = aes['hmacKey']\n return AesKey(aes['aesKeyString'],\n HmacKey(hmac_val['hmacKeyString'], hmac_val['size']),\n aes['size'], keyinfo.GetMode(aes['mode']))", "def parse_tag_key_value(key_value: str, value_required=True) -> Tuple[str, Any]:\n if not key_value:\n raise ValueError(\"key must be specified.\")\n\n if \"=\" not in key_value:\n if value_required:\n raise ValueError(f\"key=value pair expected: '{key_value}'\")\n return (key_value, ANY_VALUE)\n\n key, value = key_value.split(\"=\", 1)\n if not key:\n raise ValueError(f\"key must be specified: '{key_value}'\")\n return (key, parse_tag_value(value))", "def parse_key(key: RSA.RsaKey) -> str:\n\n return binascii.hexlify(key.exportKey(\n format='DER')).decode('ascii')", "def _sloppy_parse_user_and_api_data (self, key, contents):\n key_start = contents.find(key + '\"')\n if int(key_start) == -1:\n return None\n sub_contents = contents[int(key_start):]\n l = sub_contents.find('\",')\n return contents[(int(key_start)+len(key)+3):int(key_start)+l].decode('string_escape')", "def decode_key(key: str) -> Tuple[int, int]:\n try:\n mod, exp = key.split(\".\")\n except ValueError:\n raise ValueError(f\"`{key}` is not a valid key\")\n\n return (\n int.from_bytes(base64.urlsafe_b64decode(mod), config.BYTEORDER),\n int.from_bytes(base64.urlsafe_b64decode(exp), config.BYTEORDER, signed=True),\n )", "def _parse_raw_header_entries(header_entries):\n\n def __check_key(key):\n return not(\"_\" in key or \" \" in key or \":\" in key or not len(key))\n\n result = {}\n if (len(header_entries) < 1):\n return result\n\n # Remove leading '--'\n header_entries = header_entries[1:]\n if (not len(header_entries) % 2 == 0):\n raise ValueError(\"last key does not have a value\")\n\n while (len(header_entries)):\n # Retrieve raw key\n logging.debug(\"current header content \" + str(header_entries))\n word = header_entries[0]\n header_entries = header_entries[1:]\n\n # Try to trim equal\n if (word[-1] == ':'):\n word = word[:-1]\n\n if(not __check_key(word)):\n raise ValueError(\"invalid key '{}' in key value list\".format(word))\n\n result[word] = header_entries[0]\n header_entries = header_entries[1:]\n\n return result", "def Read(key):\n aes = json.loads(key)\n hmac = aes['hmacKey']\n return AesKey(aes['aesKeyString'],\n HmacKey(hmac['hmacKeyString'], hmac['size']),\n aes['size'], keyinfo.GetMode(aes['mode']))", "def parsePrivateKey(s):\r\n return parsePEMKey(s, private=True)", "def from_b58check(key):\n return HDKey.from_bytes(base58.b58decode_check(key))[0]", "def as_key(key):\n return key.lstrip('/').rstrip('/')", "def parse_public_key(data: bytes) -> str:\n key_prefix = {\n b'\\x00': b'edpk',\n b'\\x01': b'sppk',\n b'\\x02': b'p2pk'\n }\n return base58_encode(data[1:], key_prefix[data[:1]]).decode()", "async def read(self, key: str) -> ResponseOrKey:", "def parseAsPublicKey(s):\r\n return parsePEMKey(s, public=True)", "def parse_key_string(give_dict):\n key_string = ''\n for name in give_dict:\n if name == 'announced-date' or name == 'currency' or name == 'price':\n continue\n name_value = give_dict[name].lower()\n name_value = ' '.join(name_value.split())\n key_string = \"%s %s\" % (key_string, name_value)\n # To simplify the matching script:\n # the first and last character of the key has to be a whitespace\n return \"%s \" % key_string", "def resolve_key(obj, _):\n return obj.key.decode()", "def _decode_key(self, key):\n if hasattr(key, \"char\"):\n return str(key.char).lower()\n elif hasattr(key, \"name\"):\n return str(key.name).lower()", "def _get_key_from_file(file_contents, key):\n r = re.compile('^{}\\=[\\'\\\"]*([^\\'\\\"\\n]*)'.format(key), re.MULTILINE)\n match = r.search(file_contents)\n if match:\n return match.group(1)\n else:\n return ''", "def GetKeyByPath(self, key_path):", "def parse(self, key: str):\n redis_type = self.client.type(key).decode('utf-8')\n key_type = key\n if \":\" in key:\n key_type = key.split(\":\")[1]\n if redis_type == 'hash':\n deserializer = self.STATE_DESERIALIZERS.get(key_type)\n if not deserializer:\n raise AttributeError(NO_DESERIAL_MSG.format(key_type))\n self._parse_hash_type(deserializer, key)\n elif redis_type == 'set':\n deserializer = self.STATE_DESERIALIZERS.get(key_type)\n if not deserializer:\n raise AttributeError(NO_DESERIAL_MSG.format(key_type))\n self._parse_set_type(deserializer, key)\n else:\n value = self.client.get(key)\n # Try parsing as json first, if there's decoding error, parse proto\n try:\n self._parse_state_json(value)\n except (UnicodeDecodeError, JSONDecodeError, AttributeError):\n self._parse_state_proto(key_type, value)", "def __kv_pair(line):\n\n splitline = line.split(\"=\")\n\n if len(splitline) <= 1:\n return None, None\n\n key = splitline[0].strip()\n\n val = \"=\".join(splitline[1:]).strip()\n\n return key, val", "def load_key(self):\n\t return open(\"key.key\", \"rb\").read()", "def __init__(self, key: str) -> None:\n self.key = _parse_url(key) or key", "def test_short():\n key = 'A' * 241\n full_key = 'prefix:1:%s' % key\n assert full_key == make_key(key, 'prefix', 1)", "def _check_key(key): # type: (str) -> None\n if not key:\n raise ValueError('Key must not be empty.')\n if '.' in key:\n raise ValueError('Key must not contain dots.')", "def key(key):\n return key", "def parseLine(line):\n\n\teq = line.find('=')\n\tif eq == -1: raise Exception()\n\tkey = line[:eq].strip()\n\tvalue = line[eq+1:-1].strip()\n\treturn key, parseValue(value)", "def prepare_key(self, key):\n return smart_str(key)", "def handle_key(self, key):\n pass", "def _InitFromString(self, text):\r\n # First, remove all whitespace:\r\n text = re.sub(_WHITESPACE_RE, '', text)\r\n\r\n # Parse out the period-separated components\r\n match = _KEY_RE.match(text)\r\n if not match:\r\n raise ValueError('Badly formatted key string: \"%s\"', text)\r\n\r\n private_exp = match.group('private_exp')\r\n if private_exp:\r\n private_exp = _B64ToNum(private_exp)\r\n else:\r\n private_exp = None\r\n self.keypair = Crypto.PublicKey.RSA.construct(\r\n (_B64ToNum(match.group('mod')),\r\n _B64ToNum(match.group('exp')),\r\n private_exp))", "def key_for_bucket(self, key):\n\n try:\n return int(key[0] // 16), int(key[1] // 16), int(key[2] // 16)\n except ValueError:\n return KeyError(\"Key %s isn't usable here!\" % repr(key))", "def _clean_key_type(key_name, escape_char=ESCAPE_SEQ):\n\n for i in (2, 1):\n\n if len(key_name) < i:\n return None, key_name\n\n type_v = key_name[-i:]\n\n if type_v in _KEY_SPLIT:\n if len(key_name) <= i:\n return _KEY_SPLIT[type_v], ''\n\n esc_cnt = 0\n for pos in range(-i - 1, -len(key_name) - 1, -1):\n if key_name[pos] == escape_char:\n esc_cnt += 1\n else:\n break\n\n if esc_cnt % 2 == 0:\n return _KEY_SPLIT[type_v], key_name[:-i]\n else:\n return None, key_name\n\n return None, key_name", "def is_key(line: str) -> bool:\n return line.count('=') > 0", "def get_key_name(line: str) -> str:\n if line.find('=') == -1:\n raise Exception(\"Error: Key line must have equal sign seperating name and value\")\n return line[:line.find('=')]", "def get_key_value(line: str) -> str:\n if line.find('=') == -1:\n raise Exception(\"Error: Key line must have equal sign seperating name and value\")\n return line[line.find('=') + 1:]", "def key_for_bucket(self, key):\n\n try:\n return int(key[0] // 16), int(key[1] // 16)\n except ValueError:\n return KeyError(\"Key %s isn't usable here!\" % repr(key))", "def test_generate_key(self): \n k = Key().generate()\n self.assertRegex(k, \"[a-zA-Z0-9+\\/]+={0,2}\")", "def is_key_string(string):\r\n return len(string) > 1 and string[0] == '_'", "def __init__(self, key: bytes):\n\n if len(key) != 32:\n raise ValueError('Key must be 32 bytes long')\n self.key = key", "def getkey(attrstr, paths=None, prompt=True, promptpass=False):\n paths = paths or DEFAULT_PATHS\n for path in paths:\n filepath = os.path.expanduser(path)\n if not os.path.exists(filepath):\n continue\n with open(filepath, 'r') as handle:\n value = rget(json.load(handle), attrstr)\n if value is None:\n continue\n if isinstance(value, dict):\n raise Exception(f'Ambiguous key: {attrstr}')\n if isinstance(value, list):\n return value\n if not isinstance(value, str):\n return value\n if not value.startswith('b64:'):\n return value\n return b64decode(value[4:]).decode('utf8')\n promptfunc = getpass if promptpass else input\n if prompt:\n return promptfunc(f'Enter {attrstr}: ')\n pathstr = '\\n' + '\\n'.join(paths)\n raise Exception(f'Key not found: {attrstr}{pathstr}')", "def parse_public(data):\n\n if not isinstance(data, byte_cls):\n raise TypeError(pretty_message(\n '''\n data must be a byte string, not %s\n ''',\n type_name(data)\n ))\n\n key_type = None\n\n # Appears to be PEM formatted\n if re.match(b'\\\\s*-----', data) is not None:\n key_type, algo, data = _unarmor_pem(data)\n\n if key_type == 'private key':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a public key or\n certificate, but rather a private key\n '''\n ))\n\n # When a public key returning from _unarmor_pem has a known algorithm\n # of RSA, that means the DER structure is of the type RSAPublicKey, so\n # we need to wrap it in the PublicKeyInfo structure.\n if algo == 'rsa':\n return PublicKeyInfo.wrap(data, 'rsa')\n\n if key_type is None or key_type == 'public key':\n try:\n pki = PublicKeyInfo.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n pki.native\n return pki\n except (ValueError):\n pass # Data was not PublicKeyInfo\n\n try:\n rpk = RSAPublicKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n rpk.native\n return PublicKeyInfo.wrap(rpk, 'rsa')\n except (ValueError):\n pass # Data was not an RSAPublicKey\n\n if key_type is None or key_type == 'certificate':\n try:\n parsed_cert = Certificate.load(data)\n key_info = parsed_cert['tbs_certificate']['subject_public_key_info']\n return key_info\n except (ValueError):\n pass # Data was not a cert\n\n raise ValueError('The data specified does not appear to be a known public key or certificate format')", "def get_key_from_line(line):\n if line.find(\"#\") != 0 or line.find(\"!\") != 0:\n index_key_end = line.find(\"=\")\n while (index_key_end > 0) and (line[index_key_end - 1] == \"\\\\\"):\n index_key_end = line.find(\"=\", index_key_end + 1)\n if index_key_end > 0:\n return line[0:index_key_end].strip()\n return None", "def test_getKey_nokey(self):\n filename = os.path.join(os.getcwd(), 'sekrit')\n key = crypto.getKey(filename)\n self.failUnlessIsInstance(key, basestring,\n \"key isn't a string! type=%r\" % type(key))", "def read_key_str(op, key, maxlen=None, fmt=None, allow_blank=False):\n if key not in op:\n return None\n assert isinstance(op[key], str), 'key `%s` was not str' % key\n assert allow_blank or op[key], 'key `%s` was blank' % key\n assert op[key] == op[key].strip(), 'invalid padding: %s' % key\n assert not maxlen or len(op[key]) <= maxlen, 'exceeds max len: %s' % key\n\n if fmt == 'hex':\n assert re.match(r'^#[0-9a-f]{6}$', op[key]), 'invalid HEX: %s' % key\n elif fmt == 'lang':\n assert op[key] in LANGS, 'invalid lang: %s' % key\n else:\n assert fmt is None, 'invalid fmt: %s' % fmt\n\n return op[key]", "def get_key(command):\n return command.split(\" \")[1]", "async def parse(self, raw: str) -> dict:", "def load_key(self, type, keyid):\n pass", "def read_keyname(self):\n self.show(f'cat {self.keyname_file}')\n with open(self.keyname_file) as f:\n keyname = f.readline().strip()\n self.report('Using key:', keyname)\n return keyname", "def _parse_spec(self):\n\n key, value = self._lines.current.split(':', 1)\n key, value = key.strip(), value.strip()\n value = int(value) if key in self._INT_PROPERTIES else value\n\n try:\n next(self._lines)\n except StopIteration:\n pass\n\n return {key: value}", "def _parse_spec(self):\n\n key, value = self._lines.current.split(':', 1)\n key, value = key.strip(), value.strip()\n value = int(value) if key in self._INT_PROPERTIES else value\n\n try:\n next(self._lines)\n except StopIteration:\n pass\n\n return {key: value}", "def key(name):\n return (\n Literal(name) ^\n (sep('\\'') + Literal(name) + sep('\\'')) ^\n (sep('\"') + Literal(name) + sep('\"')))", "def parse_public_key_line_pattern(line):\n for pattern in RE_PUBLIC_KEY_PATTERNS:\n m = pattern.match(line)\n if m:\n return m.groupdict()\n return None", "def _get_raw_key(args, key_field_name):\n flag_key = getattr(args, key_field_name, None)\n if flag_key is not None:\n return flag_key\n return _read_key_store_file().get(key_field_name)", "def unwrap(self):\n\n if self.algorithm == 'ec':\n return self.asn1['public_key']\n return self.asn1['public_key'].parsed", "def getKeyFromString(self, key_str, errors=None, add_default=True):\n if key_str:\n comps = key_str.split('.')\n new_key = self\n for key in comps:\n if hasattr(new_key, key):\n new_key=getattr(new_key, key)\n elif isinstance(errors, Errors):\n errors.Add(ErrMsg.Error.Validation.Invalidkey, key)\n return None\n else:\n return None\n\n # if key_str is only a partial keychain, add default keys to complete it.\n if len(comps) < 3 and add_default:\n return self._defaultKeyChain(new_key, errors)\n else:\n return new_key\n else:\n return self", "def load_key(self, key):\n self.key = key", "def _key_hash(self, key):\n\n split_key = key.strip(' ').split(' ')[1]\n return int(split_key)", "def isValidKey(key):\n return True", "def load(self, key: str) -> str:\n pass", "def read_key(stub, key):\n try:\n response = stub.Read(keyval_pb2.ReadRequest(key=key))\n print(\"Read result:\")\n print_response(response)\n except grpc.RpcError as exception:\n print_response(exception)", "def __compound_key(key):\n x_int = int(key[0])\n y_int = int(key[1])\n zeros = len(str(y_int))\n key = x_int * (10 ** zeros) + y_int\n\n return key", "def key_value_pair(line):\n key = None\n value = None\n try:\n key, value = line.split(\"=\", 1)\n except ValueError:\n print(\"line must be format: key=value, but now is:\", line)\n sys.exit(1)\n try:\n value = int(value)\n except ValueError:\n print(\"Error: you input value must be integer, but now is:\", value)\n sys.exit(1)\n return key, value", "def _validKey(entry):\n # be forward compatible to zope3 contained objects\n raw_id = getattr(entry, '__name__', '')\n if not raw_id:\n raw_id = entry.getId()\n\n # This substitution is based on the description of cite key restrictions at\n # http://bibdesk.sourceforge.net/manual/BibDesk%20Help_2.html\n return VALIDIDPAT.sub('', raw_id)", "def _GetCompleteKeyOrError(arg):\n if isinstance(arg, Key):\n key = arg\n elif isinstance(arg, basestring):\n key = Key(arg)\n elif isinstance(arg, Entity):\n key = arg.key()\n elif not isinstance(arg, Key):\n raise datastore_errors.BadArgumentError(\n 'Expects argument to be an Entity or Key; received %s (a %s).' %\n (arg, typename(arg)))\n assert isinstance(key, Key)\n\n if not key.has_id_or_name():\n raise datastore_errors.BadKeyError('Key %r is not complete.' % key)\n\n return key", "def decode_credential_public_key(\n key: bytes,\n) -> Union[DecodedOKPPublicKey, DecodedEC2PublicKey, DecodedRSAPublicKey]:\n # Occassionally we might be given a public key in an \"uncompressed\" format,\n # typically from older U2F security keys. As per the FIDO spec this is indicated by\n # a leading 0x04 \"uncompressed point compression method\" format byte. In that case\n # we need to fill in some blanks to turn it into a full EC2 key for signature\n # verification\n #\n # See https://fidoalliance.org/specs/fido-v2.0-id-20180227/fido-registry-v2.0-id-20180227.html#public-key-representation-formats\n if key[0] == 0x04:\n return DecodedEC2PublicKey(\n kty=COSEKTY.EC2,\n alg=COSEAlgorithmIdentifier.ECDSA_SHA_256,\n crv=COSECRV.P256,\n x=key[1:33],\n y=key[33:65],\n )\n\n decoded_key: dict = decoder.loads(key)\n\n kty = decoded_key[COSEKey.KTY]\n alg = decoded_key[COSEKey.ALG]\n\n if not kty:\n raise InvalidPublicKeyStructure(\"Credential public key missing kty\")\n if not alg:\n raise InvalidPublicKeyStructure(\"Credential public key missing alg\")\n\n if kty == COSEKTY.OKP:\n crv = decoded_key[COSEKey.CRV]\n x = decoded_key[COSEKey.X]\n\n if not crv:\n raise InvalidPublicKeyStructure(\"OKP credential public key missing crv\")\n if not x:\n raise InvalidPublicKeyStructure(\"OKP credential public key missing x\")\n\n return DecodedOKPPublicKey(\n kty=kty,\n alg=alg,\n crv=crv,\n x=x,\n )\n elif kty == COSEKTY.EC2:\n crv = decoded_key[COSEKey.CRV]\n x = decoded_key[COSEKey.X]\n y = decoded_key[COSEKey.Y]\n\n if not crv:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing crv\")\n if not x:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing x\")\n if not y:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing y\")\n\n return DecodedEC2PublicKey(\n kty=kty,\n alg=alg,\n crv=crv,\n x=x,\n y=y,\n )\n elif kty == COSEKTY.RSA:\n n = decoded_key[COSEKey.N]\n e = decoded_key[COSEKey.E]\n\n if not n:\n raise InvalidPublicKeyStructure(\"RSA credential public key missing n\")\n if not e:\n raise InvalidPublicKeyStructure(\"RSA credential public key missing e\")\n\n return DecodedRSAPublicKey(\n kty=kty,\n alg=alg,\n n=n,\n e=e,\n )\n\n raise UnsupportedPublicKeyType(f'Unsupported credential public key type \"{kty}\"')", "def import_key(key: str) -> RSA.RsaKey:\n\n return RSA.importKey(binascii.unhexlify(key))", "def tokenize_key_value_pair(kv_pair):\n key, value = kv_pair.strip().split('\\t')\n key = tuple(key.strip().split())\n value = tuple(value.strip().split())\n return (key, value)", "def read_bing_key():\n # See Python Anti-Patterns - it's an awesome resource!\n # Here we are using \"with\" when opening documents.\n # http://docs.quantifiedcode.com/python-anti-patterns/maintainability/\n bing_api_key = None\n\n try:\n with open('bing.key','r') as f:\n bing_api_key = f.readline().rstrip()\n except:\n raise IOError('bing.key file not found')\n\n return bing_api_key", "def load_key():\n return open(\"pass.key\", \"rb\").read()", "def _extract_bucket_key(s3_uri: str)->tuple:\n s3_regex=\"^s3://([a-z0-9.-]+)/(.*)$\"\n search =re.search(s3_regex, s3_uri)\n if search is None:\n raise Error(\"Invalid s3 uri: {}\".format(s3_uri))\n return search.groups()", "def parse_authenticator_data(val: bytes) -> AuthenticatorData:\n # Don't bother parsing if there aren't enough bytes for at least:\n # - rpIdHash (32 bytes)\n # - flags (1 byte)\n # - signCount (4 bytes)\n if len(val) < 37:\n raise InvalidAuthenticatorDataStructure(\n f\"Authenticator data was {len(val)} bytes, expected at least 37 bytes\"\n )\n\n pointer = 0\n\n rp_id_hash = val[pointer:32]\n pointer += 32\n\n # Cast byte to ordinal so we can use bitwise operators on it\n flags_bytes = ord(val[pointer : pointer + 1])\n pointer += 1\n\n sign_count = val[pointer : pointer + 4]\n pointer += 4\n\n # Parse flags\n flags = AuthenticatorDataFlags(\n up=flags_bytes & (1 << 0) != 0,\n uv=flags_bytes & (1 << 2) != 0,\n be=flags_bytes & (1 << 3) != 0,\n bs=flags_bytes & (1 << 4) != 0,\n at=flags_bytes & (1 << 6) != 0,\n ed=flags_bytes & (1 << 7) != 0,\n )\n\n # The value to return\n authenticator_data = AuthenticatorData(\n rp_id_hash=rp_id_hash,\n flags=flags,\n sign_count=int.from_bytes(sign_count, \"big\"),\n )\n\n # Parse AttestedCredentialData if present\n if flags.at is True:\n aaguid = val[pointer : pointer + 16]\n pointer += 16\n\n credential_id_len = int.from_bytes(val[pointer : pointer + 2], \"big\")\n pointer += 2\n\n credential_id = val[pointer : pointer + credential_id_len]\n pointer += credential_id_len\n\n \"\"\"\n Some authenticators incorrectly compose authData when using EdDSA for their public keys.\n A CBOR \"Map of 3 items\" (0xA3) should be \"Map of 4 items\" (0xA4), and if we manually adjust\n the single byte there's a good chance the authData can be correctly parsed. Let's try to\n detect when this happens and gracefully handle it.\n \"\"\"\n # Decodes to `{1: \"OKP\", 3: -8, -1: \"Ed25519\"}` (it's missing key -2 a.k.a. COSEKey.X)\n bad_eddsa_cbor = bytearray.fromhex(\"a301634f4b500327206745643235353139\")\n # If we find the bytes here then let's fix the bad data\n if val[pointer : pointer + len(bad_eddsa_cbor)] == bad_eddsa_cbor:\n # Make a mutable copy of the bytes...\n _val = bytearray(val)\n # ...Fix the bad byte...\n _val[pointer] = 0xA4\n # ...Then replace `val` with the fixed bytes\n val = bytes(_val)\n\n # Load the next CBOR-encoded value\n credential_public_key = cbor2.loads(val[pointer:])\n credential_public_key_bytes = cbor2.dumps(credential_public_key)\n pointer += len(credential_public_key_bytes)\n\n attested_cred_data = AttestedCredentialData(\n aaguid=aaguid,\n credential_id=credential_id,\n credential_public_key=credential_public_key_bytes,\n )\n authenticator_data.attested_credential_data = attested_cred_data\n\n if flags.ed is True:\n extension_object = cbor2.loads(val[pointer:])\n extension_bytes = cbor2.dumps(extension_object)\n pointer += len(extension_bytes)\n authenticator_data.extensions = extension_bytes\n\n # We should have parsed all authenticator data by this point\n if (len(val) > pointer):\n raise InvalidAuthenticatorDataStructure(\n \"Leftover bytes detected while parsing authenticator data\"\n )\n\n return authenticator_data", "def string_to_keypair(self, data): \n return keypair_lst", "def isItBlankOrNot(stupidkeyval):\n # Annoying magic, but there's no easy way to deal with\n # completely blank/missing values so we do what we can\n result = stupidkeyval.split(':')\n if len(result) == 1:\n # Can we even get here? Not in any good way\n result = 'Undefined'\n elif len(result) == 2:\n # Expected entry point\n # Check the place where we expect to find the obsplan.\n # If it's blank, put *something* in it.\n if result[1].strip() == '':\n result = 'Undefined'\n else:\n result = result[1].strip()\n elif result is None:\n result = 'Undefined'\n\n return result", "def _GetKeyString(self):" ]
[ "0.76626766", "0.6655824", "0.6617215", "0.6501489", "0.6450143", "0.6249241", "0.6145767", "0.5899227", "0.5899173", "0.5889372", "0.5886852", "0.58818334", "0.5807456", "0.58011734", "0.5785233", "0.5777432", "0.57585365", "0.57519037", "0.5722677", "0.5717174", "0.57077587", "0.5704716", "0.5698656", "0.56439745", "0.56374884", "0.5634988", "0.56306034", "0.56060624", "0.55468535", "0.5540998", "0.55399925", "0.5538889", "0.55029273", "0.5498555", "0.5480745", "0.5473044", "0.5469273", "0.5441158", "0.54309684", "0.54030627", "0.53937405", "0.53924763", "0.535583", "0.53465897", "0.5304046", "0.5301616", "0.5296638", "0.5295501", "0.5285356", "0.52752876", "0.52682203", "0.52220464", "0.52170837", "0.51852286", "0.5181231", "0.518114", "0.5174792", "0.51738894", "0.5160799", "0.51594967", "0.5145977", "0.5144503", "0.5130401", "0.51292235", "0.5123295", "0.5123194", "0.511594", "0.5096019", "0.5094655", "0.5091956", "0.5081644", "0.5080709", "0.5059331", "0.50587296", "0.50547665", "0.50547665", "0.5050608", "0.5049826", "0.5049628", "0.5035294", "0.50348526", "0.5016752", "0.5012218", "0.50014836", "0.4996589", "0.4982448", "0.497993", "0.49657807", "0.49510756", "0.49506527", "0.4946481", "0.49399808", "0.49343005", "0.4934067", "0.49319187", "0.49189028", "0.49162018", "0.49125114", "0.4905108", "0.49033967" ]
0.810298
0
Attempts to parse a value at the current position.
def _parse_value(self): # type: () -> Item self.mark() trivia = Trivia() c = self._current if c == '"': return self._parse_basic_string() elif c == "'": return self._parse_literal_string() elif c == "t" and self._src[self._idx :].startswith("true"): # Boolean: true self.inc_n(4) return Bool(True, trivia) elif c == "f" and self._src[self._idx :].startswith("false"): # Boolean: true self.inc_n(5) return Bool(False, trivia) elif c == "[": # Array elems = [] # type: List[Item] self.inc() while self._current != "]": self.mark() while self._current.is_ws() or self._current == ",": self.inc() if self._idx != self._marker: elems.append(Whitespace(self.extract())) if self._current == "]": break if self._current == "#": cws, comment, trail = self._parse_comment_trail() next_ = Comment(Trivia("", cws, comment, trail)) else: next_ = self._parse_value() elems.append(next_) self.inc() res = Array(elems, trivia) if res.is_homogeneous(): return res raise self.parse_error(MixedArrayTypesError) elif c == "{": # Inline table elems = Container() self.inc() while self._current != "}": if self._current.is_ws() or self._current == ",": self.inc() continue key, val = self._parse_key_value(False) elems.append(key, val) self.inc() return InlineTable(elems, trivia) elif c in string.digits + "+" + "-": # Integer, Float, Date, Time or DateTime while self._current not in " \t\n\r#,]}" and self.inc(): pass raw = self.extract() item = self._parse_number(raw, trivia) if item: return item try: res = parse_rfc3339(raw) except ValueError: res = None if res is None: raise self.parse_error(InvalidNumberOrDateError) if isinstance(res, datetime.datetime): return DateTime(res, trivia, raw) elif isinstance(res, datetime.time): return Time(res, trivia, raw) elif isinstance(res, datetime.date): return Date(res, trivia, raw) else: raise self.parse_error(InvalidNumberOrDateError) else: raise self.parse_error(UnexpectedCharError, (c))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse(self, value):\n raise NotImplementedError(\"Please implement the Class\")", "def parse_value(cls, value):\n return value", "def parse_value(cls, value):\n return value", "def parse_value(cls, value):\n raise NotImplementedError(\"subclass must implement parse_value()\")", "def parse_and_set_value(self, new_value_str):\n self.value = self.parse(new_value_str)", "def value(self) -> ParsedT:\n ...", "def Parse(self, argument):\n self._value = argument", "def parse(value):\n return int(value)", "def parseValue(self, value):\n if self.isNumericVector():\n return map(self._pythonType, value.split(','))\n if self.typ == 'boolean':\n return _parseBool(value)\n return self._pythonType(value)", "def __parse(self) -> object:\r\n char = self.data[self.idx: self.idx + 1]\r\n if char in [b'1', b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b'0']:\r\n str_len = int(self.__read_to(b':'))\r\n return self.__read(str_len)\r\n elif char == b'i':\r\n self.idx += 1\r\n return int(self.__read_to(b'e'))\r\n elif char == b'd':\r\n return self.__parse_dict()\r\n elif char == b'l':\r\n return self.__parse_list()\r\n elif char == b'':\r\n raise DecodingError('Unexpected End of File at index position of {0}.'.format(str(self.idx)))\r\n else:\r\n raise DecodingError('Invalid token character ({0}) at position {1}.'.format(str(char), str(self.idx)))", "def _parse_value(self, write_token=True, override=None):\n v_str = self.prior_token\n\n # Construct the complex string\n if v_str == '(':\n v_re = self.token\n\n self._update_tokens(write_token)\n assert self.token == ','\n\n self._update_tokens(write_token)\n v_im = self.token\n\n self._update_tokens(write_token)\n assert self.token == ')'\n\n self._update_tokens(write_token, override)\n v_str = '({0}, {1})'.format(v_re, v_im)\n\n recast_funcs = [int, pyfloat, pycomplex, pybool, pystr]\n\n for f90type in recast_funcs:\n try:\n # Unclever hack.. integrate this better\n if f90type == pybool:\n value = pybool(v_str, self.strict_logical)\n else:\n value = f90type(v_str)\n return value\n except ValueError:\n continue", "def parse(self, argument):\n self._value = argument", "def test__parse_next(value, position, expected_output, expected_position):\n state = ParserState(value)\n state.position = position\n \n output = parse_next(state)\n vampytest.assert_instance(output, tuple)\n vampytest.assert_eq(output, expected_output)\n vampytest.assert_eq(state.position, expected_position)", "def parser(self, value: Optional[Callable[[Mapping], Mapping]]) -> None:\n self._parse = value", "def read_value(stream):\n strip_whitespace(stream)\n\n if stream.eof():\n raise VeryUnexpectedEndException(stream, \"Encountered EOF while scanning for a value\")\n\n char = stream.peek()\n if '\"' == char:\n value = read_string(stream)\n value_type = SUCH_STRING\n\n elif char in VALID_TOKEN_CHARS:\n value = read_token(stream)\n value_type = SUCH_CONST\n if \"yes\" == value:\n value = True\n elif \"no\" == value:\n value = False\n elif \"empty\" == value:\n value = None\n else:\n # It's a token Bob!\n value_type = SUCH_TOKEN\n\n elif char in \"1234567890\":\n value = read_number(stream)\n value_type = SUCH_NUMBER\n\n else:\n raise ManyParseException(stream, \"Invalid value start character: {!r}\".format(char))\n\n return value, value_type", "def parse_value(tokens: deque) -> JSON:\n tk = tokens[0]\n\n if tk == \"[\":\n return parse_list(tokens)\n elif tk.type == \"NUMBER\":\n tokens.popleft() # É necessário consumir o 1o token\n return float(tk)\n \n # Complete com as outras regras de objeto, STRING, BOOL e NULL\n # ...\n else:\n raise SyntaxError(\"token inesperada em lista: %r\" % tk)", "def value_at( self, position ):\n # TODO: check if position falls within value\n raise NotImplementedError(\"valueAt\")", "def value_from_str(self, s):\n raise ValueError()", "def _parse_value(self, data, flags):\n\n\t\tif flags & Client._FLAG_COMPRESSED:\n\t\t\tdata = decompress(data)\n\n\t\tif flags == 0 or flags == Client._FLAG_COMPRESSED:\n\t\t\t# Either a bare string or a compressed string now decompressed...\n\t\t\tvalue = data\n\t\telif flags & Client._FLAG_INTEGER:\n\t\t\tvalue = int(data)\n\t\telif flags & Client._FLAG_LONG:\n\t\t\tvalue = long(data)\n\t\telif flags & Client._FLAG_PICKLE:\n\t\t\ttry:\n\t\t\t\tvalue = pickle.loads(data)\n\t\t\texcept Exception:\n\t\t\t\tself._debuglog('Pickle error...\\n')\n\t\t\t\tvalue = None\n\t\telse:\n\t\t\tself._debuglog(\"unknown flags on get: %x\\n\" % flags)\n\n\t\treturn value", "def maybe_advance(self, expected_type):\n token = self._get_token()\n if token and token.type == expected_type:\n self.pos = token.pos\n return token.value\n return None", "def parse_value(self) -> SyntaxNode:\n if self.current.tok_type == \")\":\n self.consume_if(\")\")\n node = self.parse_expr()\n self.consume_if(\"(\")\n return node\n token = self.consume_if(\"N\")\n return SyntaxNode(token=token, children=[])", "def _parse_value(value):\n # Check if it is a boolean, int, or float value\n try:\n value = json.loads(value.lower())\n return value\n except ValueError:\n return value", "def _value(token):\n result = re.match(r'\\d*', '0' + token)\n return int(result.group(0))", "def VALUE(text):\n # This is not particularly robust, but makes an attempt to handle a number of cases: numbers,\n # including optional comma separators, dates/times, leading dollar-sign.\n if isinstance(text, (numbers.Number, datetime.date)):\n return text\n text = text.strip().lstrip('$')\n nocommas = text.replace(',', '')\n if nocommas == \"\":\n return 0\n\n try:\n return int(nocommas)\n except ValueError:\n pass\n\n try:\n return float(nocommas)\n except ValueError:\n pass\n\n try:\n return dateutil.parser.parse(text)\n except ValueError:\n pass\n\n raise ValueError('text cannot be parsed to a number')", "def _parse_field(\n self,\n line: List[str],\n index: int,\n make_invalid_measurement_missing: bool = False,\n ) -> int:\n result = None\n try:\n if line[index]:\n result = int(line[index])\n except (ValueError, IndexError) as ex:\n if not make_invalid_measurement_missing:\n raise ex\n result = None\n return result", "def _parse(value, function, fmt):\n try:\n return function(value)\n except ValueError as e:\n raise_from(ValueError(fmt.format(e)), None)", "def _parse_and_validate(self, val):\n if self._is_parameter_type:\n val = self._parse(val) if isinstance(val, str) else val\n self._validate_or_throw(val)\n return val", "def _parse(val: str):\n\n if not isinstance(val, str):\n raise TypeError(\"Method requires string input\")\n\n value = re.findall(r'^([-+]?\\d*\\.\\d*(?=\\s)|\\d+(?=\\s))', val)\n if not (value and val[:len(value[0])] == value[0]):\n return val, None\n\n # string starts with value\n value = value[0]\n val = val[len(value):]\n\n val = val.strip()\n if val:\n unit = val\n else:\n unit = 'dimensionless'\n\n return value, unit", "def try_parse_field(field_name, value, parser_dict):\n parser = parser_dict.get(field_name) # None if no such entry\n if parser is not None:\n return try_or_none(parser)(value)\n else:\n return value", "def parse_variable_value(self, line):\n tmp = self.parse_metadata_line(line, 'get')\n md_var_names, md_names, md_line = tmp\n\n # Error Checking\n for v_name, m_name in zip(md_var_names, md_names):\n if v_name not in self.variables:\n self.E_str = \"parse_variable_value\"\n self.print_error(f\"Undeclared variable '{v_name}'\")\n\n Var = getattr(self, v_name)\n if m_name not in Var.metadata:\n self.E_str = \"parse_variable_value\"\n self.print_error(f\"Undeclared metadata '{m_name}'\")\n\n # Split any lists up\n str_part, non_str = gen_parse.get_str_between_delims(line)\n words = [md_line]\n parsed_val = False\n if ',' in non_str:\n if '[' in md_line:\n value = parse_nested_lists(md_line)\n parsed_val = True\n\n if not parsed_val:\n # Loop over all values in lists\n values = []\n for word in words:\n # If there is no metadata just set the value\n if len(md_var_names) == 0:\n value = type_check.eval_type(word)\n if type(value) == str:\n value = gen_parse.rm_quotation_marks(value)\n value, _ = self.find_vars_in_str(value)\n\n # Replace all metadata instances with their values\n else:\n # Loop over all metadata\n for var_i, (v_name, m_name) in enumerate(zip(md_var_names,\n md_names)):\n Var = getattr(self, v_name)\n metadata = Var[m_name]\n word = word.replace(f\"METADATA_{var_i}\", str(metadata))\n value = word\n\n if type(value) == str:\n value = type_check.eval_type(value)\n values.append(value)\n\n value = values\n if len(value) == 1:\n value = values[0]\n\n return value", "def get_value(value):\n if value:\n return value.split('\\n')[0]\n else:\n return None", "def try_parse_field(field_name, value, parser_dict):\n parser = parser_dict.get(field_name) # None if no such entry\n if parser is not None:\n return try_or_none(parser)(value)\n else:\n return value", "def parse(cls, s):\n raise NotImplementedError", "def parse(self,value):\r\n\t\treturn str(value)", "def val_parser(parser, inputstring):\n\n inputstring = inputstring.strip()\n\n if float(inputstring) == 9.9e37:\n output = float('inf')\n else:\n output = float(inputstring)\n if parser == int:\n output = parser(output)\n\n return output", "def parse_data_value(self, value):\n #print('parsing: {}'.format(value))\n if len(value) == 0:\n return value\n elif value[0] == '(' and value[-1] == ')':\n newdict = {}\n cur_level = 0\n cur_key = []\n cur_value = []\n cur_inner = []\n state = 0\n first_key_pass = False\n for char in value[1:-1]:\n\n # State 0 - reading key\n if state == 0:\n if char == '=':\n state = 1\n elif first_key_pass and char == ',':\n pass\n else:\n cur_key.append(char)\n first_key_pass = False\n\n # State 1 - reading value\n elif state == 1:\n if char == ',':\n newdict[''.join(cur_key)] = self.parse_data_value(''.join(cur_value))\n cur_key = []\n cur_value = []\n cur_inner = []\n first_key_pass = True\n state = 0\n elif char == '(':\n cur_level += 1\n cur_inner.append(char)\n state = 2\n else:\n cur_value.append(char)\n\n # State 2 - Reading first char of an inner paren stanza\n elif state == 2:\n if char == '(':\n newdict[''.join(cur_key)] = []\n state = 4\n at_first = True\n else:\n state = 3\n\n # State 3 - reading a regular inner dict\n if state == 3:\n if char == '(':\n cur_level += 1\n elif char == ')':\n cur_level -= 1\n cur_inner.append(char)\n if cur_level == 0:\n newdict[''.join(cur_key)] = self.parse_data_value(''.join(cur_inner[1:-1]))\n cur_key = []\n cur_value = []\n cur_inner = []\n first_key_pass = True\n state = 0\n\n # State 4 - Reading a list\n elif state == 4:\n if char == '(':\n cur_level += 1\n if not at_first:\n cur_inner.append(char)\n elif char == ')':\n cur_level -= 1\n cur_inner.append(char)\n\n if cur_level == 1:\n newdict[''.join(cur_key)].append(self.parse_data_value(''.join(cur_inner)))\n cur_inner = []\n\n elif cur_level == 0:\n cur_key = []\n cur_value = []\n cur_inner = []\n first_key_pass = True\n state = 0\n\n elif cur_level == 1 and char == ',':\n pass\n\n else:\n cur_inner.append(char)\n\n at_first = False\n\n # Clean up, depending on our state\n if state == 0:\n pass\n elif state == 1:\n newdict[''.join(cur_key)] = self.parse_data_value(''.join(cur_value))\n else:\n raise Exception(\"shouldn't be able to get here\")\n\n return newdict\n else:\n\n # Check for quoted values, and don't split commas inside them.\n # Also don't try to parse mismatched quotes. We're just being\n # even more stupid about it and converting commas in quotes to\n # unicode snowmen, temporarily\n new_value = value\n replace_comma = u\"\\u2603\"\n quote_parts = value.split('\"')\n if len(quote_parts) > 1 and len(quote_parts) % 2 == 1:\n new_val_list = []\n for (idx, part) in enumerate(quote_parts):\n if idx % 2 == 1:\n new_val_list.append(part.replace(',', replace_comma))\n else:\n new_val_list.append(part)\n new_value = '\"'.join(new_val_list)\n\n parts = [p.replace(replace_comma, ',') for p in new_value.split(',')]\n if len(parts) == 1:\n # See the comment on the other side of the `if` here. We may have\n # a single-element dict.\n if '=' in value:\n newdict = {}\n (key, val) = value.split('=', 1)\n newdict[key] = val\n return newdict\n else:\n return value\n else:\n # This is hokey, and a byproduct of the stupid way we're parsing\n # this stuff (and is susceptible to corner cases) - anyway, at\n # this point we MAY have a dict, or we may just have a string\n # which happens to have a comma in it. We'll just test the first\n # element and see if there's an equals sign in it. If it does,\n # then we'll parse it as a dict. If not, just return as a string.\n if '=' in parts[0]:\n newdict = {}\n for part in parts:\n (key, val) = part.split('=', 1)\n newdict[key] = val\n return newdict\n else:\n return value", "def _check_for_value(self):\n self.node.get_value()", "def __parse_next(self, buffer):\n\t\ttoken = buffer.read(1)\n\t\t\n\t\t_tell = buffer.tell()\n\t\t# Is it an operator?\n\t\tif token == \"/\":\n\t\t\tnum, var = self.__parse_operator(buffer)\n\t\t\tif num is None:\n\t\t\t\tbuffer.seek(_tell - 1)\n\t\t\t\treturn \"$\"\n\t\t\t\n\t\t\tif isinstance(var, str):\n\t\t\t\treturn var\n\t\t\t\n\t\t\tret = (var / num)\n\t\t\tif isinstance(ret, Range):\n\t\t\t\tret = ret.min # XXX is this right?\n\t\t\tif int(ret) != ret:\n\t\t\t\treturn \"%.1f\" % ret\n\t\t\treturn str(int(ret))\n\t\t\n\t\tif token == \"*\":\n\t\t\tnum, var = self.__parse_operator(buffer)\n\t\t\tret = var * num\n\t\t\tif isinstance(ret, float):\n\t\t\t\tret = int(round(ret))\n\t\t\treturn str(ret)\n\t\t\n\t\t# Is it a conditional?\n\t\tif token == \"?\":\n\t\t\tbuffer.seek(-1, SEEK_CUR)\n\t\t\tblocks = self.__parse_conditional(buffer)\n\t\t\t\n\t\t\t# Prepare the condition cache\n\t\t\t# This shouldn't be done here, but anyway...\n\t\t\tfor condition, value in blocks:\n\t\t\t\tcondition.evaluate({})\n\t\t\t\tself.conditions.extend(condition.identifiers)\n\t\t\t\n\t\t\t# blocks is a list of (condition, value) tuples\n\t\t\t# We evaluate the paperdoll against each of them\n\t\t\t# and return when we get a hit\n\t\t\t\n\t\t\tfor condition, value in blocks:\n\t\t\t\tif condition.evaluate(self.paperdoll):\n\t\t\t\t\treturn value\n\t\t\t\n\t\t\treturn\n\t\t\n\t\tif token == \"<\":\n\t\t\tbuffer.seek(-1, SEEK_CUR)\n\t\t\tidentifier = self.__read_block(buffer, startchr=\"<\", endchr=\">\")\n\t\t\ttry:\n\t\t\t\tvalue = self.get_variable(identifier)\n\t\t\t\treturn SpellString(value).format(self.obj, proxy=self.proxy)\n\t\t\texcept VariableNotFound:\n\t\t\t\treturn \"<%s>\" % (identifier)\n\t\t\n\t\tif token == \"{\":\n\t\t\tbuffer.seek(-1, SEEK_CUR)\n\t\t\tblock = self.__read_block(buffer, startchr=\"{\", endchr=\"}\")\n\t\t\t\n\t\t\t# Attempt to read decimals formatting\n\t\t\tdecimals = 0\n\t\t\ttoken = buffer.read(1)\n\t\t\tif token == \".\":\n\t\t\t\tdecimals = self.__read_number(buffer)\n\t\t\telif token:\n\t\t\t\t# Step one char back, only if we are not at the end\n\t\t\t\tbuffer.seek(-1, SEEK_CUR)\n\t\t\t\n\t\t\tblock = SpellString(block).format(self.obj, proxy=self.proxy, braced=True)\n\t\t\ttry: # FIXME\n\t\t\t\tblock = eval(block)\n\t\t\t\tif decimals:\n\t\t\t\t\tblock = round(block, decimals)\n\t\t\t\treturn \"%g\" % (block)\n\t\t\texcept Exception:\n\t\t\t\treturn \"[%s]\" % (block)\n\t\t\n\t\t# At this point, we need to check for functions and variables\n\t\t# but only if we don't already have a digit\n\t\tif not token.isdigit():\n\t\t\t_tell = buffer.tell()\n\t\t\tbuffer.seek(-1, SEEK_CUR)\n\t\t\tidentifier = self.__read_alpha(buffer)\n\t\t\t\n\t\t\tif identifier.lower() in FUNCTIONS:\n\t\t\t\targs = self.__parse_function_args(buffer)\n\t\t\t\treturn self.formatter.format_function(identifier, args)\n\t\t\t\n\t\t\tif identifier.lower() in PAPERDOLL_VALUES:\n\t\t\t\treturn self.formatter.format_paperdoll(identifier)\n\t\t\t\n\t\t\t\n\t\t\t# We didn't find any valid identifier\n\t\t\tif not identifier:\n\t\t\t\treturn \"$\"\n\t\t\t\n\t\t\t# Nothing left to check for but booleans\n\t\t\t# The values get messed with the identifier however, so we need to\n\t\t\t# look at only the first char\n\t\t\tif identifier[0] in BOOLEANS:\n\t\t\t\tidentifier = identifier[0]\n\t\t\t\tbuffer.seek(_tell)\n\t\t\t\tvalues = self.__parse_boolean(buffer)\n\t\t\t\treturn self.formatter.format_boolean(token, values)\n\t\t\n\t\t# It's probably a variable then\n\t\tbuffer.seek(-1, SEEK_CUR)\n\t\tspell, identifier, effect = self.__parse_macro(buffer)\n\t\t\n\t\tif identifier:\n\t\t\tspell = int(spell or 0)\n\t\t\teffect = int(effect or 1)\n\t\t\t\n\t\t\tvalue = self.formatter.format_macro(spell, identifier, effect)\n\t\t\tself.formatter.last_value = value\n\t\t\treturn str(value)\n\t\telse:\n\t\t\treturn \"$\"\n\t\t\n\t\tif not token or token.isspace():\n\t\t\treturn token\n\t\t\n\t\treturn token", "def handle_value(value):\n\n if value[-1] == 'x':\n return float(value[0:-1])\n\n if value[-1] == '%':\n return float(value[0:-1])\n\n if value[0].isdigit():\n return bytify(value)\n\n raise ValueError", "def match_value(self, token_type, token_value):\n if isinstance(self.cursor(), token_type) and self.cursor().token == token_value:\n token = self.cursor()\n self.pos += 1\n else:\n raise ParseError(\"Expected {!s}.\".format(token_value))\n return token", "def extractVal(value):\n assert value is not None, \"Value is None\"\n \n trimmed = value.strip()\n try:\n return int(trimmed)\n except ValueError:\n try:\n return float(trimmed)\n except ValueError:\n return str(trimmed)", "def parseString(self, s):\n pass", "def value_from_str(self, s):\n try:\n return int(s)\n except ValueError:\n return super().value_from_str(s)", "def _parse(self, val):\n if self.type == \"integer\":\n return int(val)\n elif self.type == \"number\":\n return float(val)\n elif self.type == \"boolean\":\n lower_val = str(val).lower()\n if lower_val not in {\"true\", \"false\"}:\n msg = \"Boolean parameter '{}' only accept True/False, got {}.\"\n raise ValidationException(\n message=msg.format(self.name, val),\n no_personal_data_message=msg.format(\"[self.name]\", \"[val]\"),\n error_category=ErrorCategory.USER_ERROR,\n target=ErrorTarget.PIPELINE,\n )\n return True if lower_val == \"true\" else False\n return val", "def peg_value(peg):\n if peg in (None, ''):\n return None\n try:\n val = int(peg)\n except ValueError:\n raise ValueError('peg revision must be a number >= 1 (%(peg)r)'\n % locals())\n else:\n if val <= 0:\n raise ValueError('peg revision needs to be >= 1 (%(val)r)'\n % locals())\n return val", "def parse_numeric(val):\n try: return int(val)\n except: pass\n\n try: return float(val)\n except: pass\n\n return val", "def parseValue(expr):\n\n\ttry:\n\t\treturn eval(expr)\n\texcept:\n\t\treturn eval(re.sub(\"\\s+\", \",\", expr))\n\telse:\n\t\treturn expr", "def parse(self, val):\n # type: (bytes) -> Any\n return val.decode()", "def n_value(self, token):", "def parse(self):\r\n for key, value in KLVParser(self.value, self.key_length):\r\n try:\r\n self.items[key] = self.parsers[key](value)\r\n except Exception:\r\n None", "def parse_value(cls, value):\n choice, value = value.split('=')\n value = cls.VALUES_MAP[value]\n\n return choice, value", "def try_parse_int(value):\n try:\n return int(value)\n except:\n return 0", "def get_value_from_str(value_str):\n try:\n return gdb.parse_and_eval(value_str)\n except RuntimeError:\n return None", "def parse(\n cls,\n value: str\n ):\n\n if value is None or len(value) == 0:\n raise ValueError(\"provided value may not be None or empty\")\n\n for item in cls:\n if value == item.value:\n # found a matching value\n return item\n\n # Fallback value in case the API adds an enum that is not supported\n # by an older version of the SDK\n return cls.Unknown", "def parse_value(self, value):\n\t\t\n\t\tif goodies.is_float(value):\n\t\t\treturn float(value)\n\t\telif goodies.is_int(value):\n\t\t\treturn int(value)\n\t\telif goodies.is_bool(value):\n\t\t\treturn bool(value.capitalize())\n\t\telse:\n\t\t\treturn value", "def _parse_values(self, offset, rules):\n position = offset\n\n # Iterate through the unpacking rules and append the retrieved values with its corresponding\n # particle name\n for key, formatter in rules:\n # Skip over spare values\n if AdcptMWVSParticleKey.SPARE in key:\n position += struct.calcsize(formatter)\n continue\n value = list(struct.unpack_from('<%s' % formatter, self.raw_data, position))\n # Support unpacking single values and lists\n if len(value) == 1:\n value = value[0]\n if AdcptMWVSParticleKey.START_TIME in key:\n timestamp = ((value[0]*100 + value[1]), value[2], value[3], value[4],\n value[5], value[6], value[7], 0, 0)\n log.trace(\"TIMESTAMP: %s\", timestamp)\n elapsed_seconds = calendar.timegm(timestamp)\n self.set_internal_timestamp(unix_time=elapsed_seconds)\n log.trace(\"DATA: %s:%s @ %s\", key, value, position)\n position += struct.calcsize(formatter)\n self.final_result.append({DataParticleKey.VALUE_ID: key,\n DataParticleKey.VALUE: value})", "def _assignValue(value):\n if value == \"\":\n return None\n else:\n return value", "def getCurrentValue(self) -> Optional[int]:\n try:\n return int(self.text())\n except ValueError:\n return None", "def peek(self):\n if self.count() <= 0:\n raise ValueError('Cannot peek at value that does not exist')\n return self.items[1]", "def parse_string(self, data):\n pass", "def evaluate(self) -> celpy.celtypes.Value:\n value = self.visit(self.ast)\n if isinstance(value, CELEvalError):\n raise value\n return cast(celpy.celtypes.Value, value)", "def _decode_value(data):\n\n if type(data) is tuple:\n data = data[0]\n\n # Key does not exist\n if data == '0' or data == \"\":\n return None\n \n elif data[0] == _PREFIX:\n\n encoding = data[:2]\n value = data[2:]\n\n if encoding == _TYPE_DOUBLE or encoding == _TYPE_DOUBLE_C:\n return float(value)\n elif encoding == _TYPE_STRING or encoding == _TYPE_STRING_C:\n return value\n elif encoding == _TYPE_INT or encoding == _TYPE_INT_C:\n return int(value)\n elif encoding == _TYPE_BOOL or encoding == _TYPE_BOOL_C:\n return value == \"true\"\n else:\n return data\n\n elif data.startswith(\"<elsystem.collections.vector>\"):\n return _decode_vector(data)\n elif data.startswith(\"<elsystem.collections.dictionary>\"):\n return _decode_dictionary(data)\n else:\n return data", "def parse(self, input):\n pass", "def parseValue(value, nodata=(\"\", \"Na\", \"NaN\", \"-\", \"--\", \"N/A\")):\n if value is None:\n return None\n if isstring(value) and value in nodata:\n return None\n if isstring(value) and value.startswith(\"(\") and value.endswith(\")\"):\n value = unwrap(value,\"(\",\")\")\n return parseValue(listify(value))\n if isstring(value) and value.startswith(\"[\") and value.endswith(\"]\"):\n value = unwrap(value,\"[\",\"]\")\n return parseValue(listify(value))\n elif isdate(value):\n return parseDate(value)\n elif isdatetime(value):\n return strftime(\"%Y-%m-%d %H:%M:%S\", value)\n elif isint(value):\n return parseInt(value)\n elif isfloat(value):\n return parseFloat(value)\n elif isbool(value):\n return parseBool(value)\n elif isstring(value):\n return value\n elif isarray(value):\n return [parseValue(item) for item in value]\n return value", "def parse_float(value):\n try:\n return float(value)\n except (ValueError, TypeError):\n return None", "def parse(self):\n raise NotImplementedError", "def parse_value_ending(self, value: str):\n if len(value) < 1:\n raise Exception(\"Failed to parse the __value.\")\n\n if value.endswith(\"T\"):\n return value[:-1], 12\n if value.endswith(\"G\"):\n return value[:-1], 9\n if value.endswith(\"M\"):\n return value[:-1], 9\n if value.endswith(\"k\") or value.endswith(\"K\"):\n return value[:-1], 3\n if value.endswith(\"m\"):\n return value[:-1], -3\n if value.endswith(\"u\"):\n return value[:-1], -6\n if value.endswith(\"n\"):\n return value[:-1], -9\n if value.endswith(\"p\"):\n return value[:-1], -12\n\n return value, 0", "def _try_index(self, value, index):\n\n try:\n return value[index]\n except TypeError:\n return value", "def eval_atom(parse_result):\r\n # Find first number in the list\r\n result = next(k for k in parse_result if isinstance(k, numbers.Number))\r\n return result", "def parse(token):\n\n pass", "def get_property_value(self, reply):\n self.logger.debug(\"Attempting to parse %s's value\", reply)\n if isinstance(reply, GetPropertyReply):\n if 8 == reply.format:\n if hasattr(reply.value, 'buf'):\n value = str(reply.value.buf())\n else:\n value = ''\n for chunk in reply.value:\n value += chr(chunk)\n self.logger.silly(\"Parsed %s\", value)\n return value\n elif reply.format in (16, 32):\n value = list(\n unpack(\n 'I' * reply.value_len,\n reply.value.buf()\n )\n )\n self.logger.silly(\"Parsed %s\", value)\n return value\n self.logger.warning(\"The reply might not be valid: %s\", reply)\n return None", "def numeric(self):\n is_negative = False\n is_integer = True\n if self.current == b\"+\":\n self.next()\n elif self.current == b\"-\":\n is_negative = True\n self.next()\n ipart, fpart = b'', b''\n\n # read integer part\n while self.is_digit:\n ipart += self.current\n self.next()\n\n # read point if exists\n if self.current == b'.':\n is_integer = False\n self.next()\n while self.is_digit:\n fpart += self.next()\n\n if not ipart and not fpart:\n self.on_parser_error(\"Invalid numeric token\")\n\n if not ipart:\n ipart = b'0'\n if not fpart:\n fpart = b'0'\n\n if is_integer:\n val = int(ipart.decode(DEFAULT_ENCODING))\n else:\n val = Decimal(\"{}.{}\".format(ipart.decode(DEFAULT_ENCODING), fpart.decode(DEFAULT_ENCODING)))\n\n if is_negative:\n val = -val\n\n return val", "def _read(self, valid):\n start = self.pos\n while valid(self.char) and self.pos < self.length:\n self._read_char()\n\n return self.data[start : self.pos]", "def raw_decode(self, s, idx=0):\n try:\n obj, end = self.scan_once(s, idx)\n except StopIteration as err:\n raise JSONDecodeError(\"Expecting value\", s, err.value) from None\n return obj, end", "def parse_value(self, value_name, default=None):\n\t\treturn self.cfg_root.find(value_name).text", "def parse_int(value):\n try:\n return int(value)\n except (ValueError, TypeError):\n return None", "def _parse(self):\n pass", "def process_result_value(self, value: str, dialect):\n pb = self._cls()\n pb.ParseFromString(value)\n return pb", "def _par_from_parser(x):\n if not isinstance(x, (numbers.Real, u.Quantity)):\n x = float(x)\n return x", "def parse(self, text):\n node = self.match(text)\n if node is None or node.end - node.start != len(text): # TODO: Why not test just end here? Are we going to add a pos kwarg or something?\n # If it was not a complete parse, return None:\n return None\n return node", "def _parse_option_value(line, option_name):\n try:\n option_value = line.split('=')[1].strip()\n except IndexError:\n option_value = ''\n if not option_value:\n raise ValueError(\"No value specified for {} option.\".format(option_name))\n return option_value", "def _parse(self, str_val: str):\n if str_val is None:\n return str_val\n\n if self._enum_class and isinstance(str_val, self._enum_class):\n return str_val # Directly return the enum value if it is the enum.\n\n if str_val not in self._str2enum:\n msg = \"Not a valid enum value: '{}', valid values: {}\"\n raise ValidationException(\n message=msg.format(str_val, \", \".join(self.enum)),\n no_personal_data_message=msg.format(\"[val]\", \"[enum]\"),\n error_category=ErrorCategory.USER_ERROR,\n target=ErrorTarget.PIPELINE,\n )\n return self._str2enum[str_val]", "def parse_value(row):\n value_regex = re.compile(r'-?\\d+\\.\\d+')\n value = value_regex.search(row)\n return value.group()", "def __call__(self, value):\n with tf.name_scope('parser'):\n data = decode(value)\n return self._parse_fn(data)", "def parse(self, data):\n raise NotImplementedError", "def _decode_index_value(self, index, value):\n if index.endswith(\"_int\"):\n return int(value)\n else:\n return value", "def _validate_value(self, val):\r\n if type(val) in (int, long, float, str, unicode, ):\r\n return val\r\n if isinstance(val, tuple) or isinstance(val, frozenset):\r\n for i in val:\r\n self._validate_value(i)\r\n return val\r\n raise TypeError(\r\n \"Only number/strings and tuples/frozensets allowed here.\",\r\n )", "def set_value(self, pos, fixed_type, value):\n self.seek(pos)\n fixed_type.marshal(self, value)\n self.seek(0, io.SEEK_END)", "def parse_tag_value(value_str: str) -> Any:\n # Empty string is short for None.\n if not value_str:\n return None\n\n # If the first character is a JSON compound type (array, object) or\n # or a string, then parse as normal json.\n if value_str[0] in (\"[\", \"{\", '\"'):\n try:\n return json.loads(value_str)\n except json.JSONDecodeError as error:\n raise ValueError(f\"Invalid tag value: {error}\")\n\n # Try to automatically infer the type of the tag value.\n try:\n return int(value_str)\n except ValueError:\n pass\n\n try:\n return float(value_str)\n except ValueError:\n pass\n\n try:\n return str2literal(value_str)\n except ValueError:\n pass\n\n # Assume string.\n return value_str", "def load_value(text=\"\", allow_space=True):\n if allow_space:\n # if empty string like \"\\t\" or \"\\n\"\n if text != \"\" and text.strip() == \"\":\n return text\n else:\n text = text.strip()\n else:\n text = text.strip()\n\n # Integer and Float\n try:\n return int(text)\n except:\n pass\n\n try:\n return float(text)\n except:\n pass\n\n # String\n if (text.startswith(\"'\") and text.endswith(\"'\") and (\n \"','\" not in text) and (\"', '\" not in text)) \\\n or (text.startswith('\"') and text.endswith('\"') and (\n '\",\"' not in text) and ('\", \"' not in text)):\n return text[1:-1]\n\n # Bool\n if text.lower() in TRUE_MARKUP:\n return True\n\n if text.lower() in FALSE_MARKUP:\n return False\n\n if text.lower() in NONE_MARKUP:\n return None\n\n if \",\" in text:\n if text == \",\":\n return list()\n value = [load_value(s, allow_space=False) for s in text.split(\",\")]\n if is_same_instance(value):\n return value\n else:\n raise ValueError(\"items in list has to be same type!\")\n\n return text", "def process(self, value):\n return value", "def peek(self):\n return self.next_val", "def parse_value(v):\n if v.strip():\n return f'{float(v):>{VALUE_SIZE}.{VALUE_SIZE - 7}e}'\n else:\n return VALUE_SIZE * ' '", "def _val(self, value):\n cast_val = self._cast(value)\n nval = cast_val\n\n if not self._validate(nval):\n self._setter_error('is invalid', cast_val)\n nval = self._default\n\n h_ok, nval = self._run_hook(nval)\n if not h_ok:\n self._setter_error('is invalid (hook)', cast_val)\n\n self.__val = nval", "def _decode_value(data):\n\n # since Value messages contain only one value, it didn't seem necessary to\n # represent it as a Python class.\n\n msg = Message(data)\n value = None\n count = 0\n\n for field in msg:\n count += 1\n if field.tag == ValueTags.STRING:\n value = field.as_string()\n\n elif field.tag == ValueTags.FLOAT:\n value = field.as_float()\n\n elif field.tag == ValueTags.DOUBLE:\n value = field.as_double()\n\n elif field.tag == ValueTags.INT64:\n value = field.as_int64()\n\n elif field.tag == ValueTags.UINT64:\n value = field.as_uint64()\n\n elif field.tag == ValueTags.SINT64:\n value = field.as_sint64()\n\n elif field.tag == ValueTags.BOOL:\n value = field.as_int32() != 0\n\n else:\n raise ValueError('Unexpected tag %d while decoding value'\n % (field.tag,))\n\n # the MVT spec says that there should be one and only one field in the\n # Value message, so check for that.\n if value is None:\n raise ValueError('Found no fields when decoding value')\n if count > 1:\n raise ValueError('Found multiple fields when decoding value')\n\n return value", "def scheme_read(src):\n if src.current() is None:\n raise EOFError\n if val == 'nil':\n return nil\n elif val not in DELIMITERS: # ( ) ' .\n return val\n elif val == '(':\n return read_tail(src)\n else:\n raise SyntaxError('unexpected token: {0}'.format(val))", "def _parse(self, val: str):\n if val is None:\n return val\n\n if self._enum_class and isinstance(val, self._enum_class):\n return val # Directly return the enum value if it is the enum.\n\n if val not in self._str2enum:\n msg = \"Not a valid enum value: '{}', valid values: {}\"\n raise ValidationException(\n message=msg.format(val, \", \".join(self.enum)),\n no_personal_data_message=msg.format(\"[val]\", \"[enum]\"),\n error_category=ErrorCategory.USER_ERROR,\n target=ErrorTarget.PIPELINE,\n error_type=ValidationErrorType.INVALID_VALUE,\n )\n return self._str2enum[val]", "def complete_value(self, value):\n pass", "def get_value(self, character):\n raise NotImplementedError()", "def __getitem__(self, index):\n return self.parses[index]" ]
[ "0.67975473", "0.6596191", "0.6596191", "0.65703046", "0.61353844", "0.60871327", "0.60674334", "0.6023864", "0.59837496", "0.5958098", "0.5956337", "0.59420466", "0.5825742", "0.57203496", "0.5710543", "0.5646205", "0.5597949", "0.5590369", "0.55894625", "0.5586219", "0.55780137", "0.54586154", "0.54463625", "0.5437273", "0.5435871", "0.5425111", "0.5422637", "0.53477913", "0.53329504", "0.5329426", "0.5326709", "0.5321185", "0.5316248", "0.53109396", "0.52976716", "0.52932525", "0.52882415", "0.52838576", "0.52785355", "0.5266505", "0.52465814", "0.5238574", "0.5238081", "0.522246", "0.5218098", "0.52161205", "0.52077717", "0.51970065", "0.51903003", "0.5187523", "0.51581573", "0.51524377", "0.51471734", "0.5146159", "0.51371855", "0.51181424", "0.5116367", "0.50932604", "0.50899565", "0.50858444", "0.5084438", "0.5063277", "0.50566274", "0.50508565", "0.5050111", "0.50438666", "0.50372547", "0.50316536", "0.5028932", "0.50267017", "0.5015583", "0.50130904", "0.5003284", "0.49969488", "0.4992185", "0.4987848", "0.49756357", "0.49726427", "0.49586657", "0.49585125", "0.4958454", "0.4950194", "0.49435332", "0.49418133", "0.4940274", "0.49340618", "0.49124253", "0.4910354", "0.4908819", "0.4902015", "0.49015215", "0.48956737", "0.4893192", "0.48901206", "0.48889804", "0.4888147", "0.4884325", "0.48839924", "0.48823145", "0.48797572" ]
0.6330559
4
Parses a table element.
def _parse_table(self): # type: (Optional[str]) -> Tuple[Key, Item] indent = self.extract() self.inc() # Skip opening bracket is_aot = False if self._current == "[": if not self.inc(): raise self.parse_error(UnexpectedEofError) is_aot = True # Key self.mark() while self._current != "]" and self.inc(): pass name = self.extract() key = Key(name, sep="") self.inc() # Skip closing bracket if is_aot: # TODO: Verify close bracket self.inc() cws, comment, trail = self._parse_comment_trail() result = Null() values = Container() while not self.end(): item = self._parse_item() if item: _key, item = item if not self._merge_ws(item, values): values.append(_key, item) else: if self._current == "[": _, name_next = self._peek_table() if self._is_child(name, name_next): key_next, table_next = self._parse_table() key_next = Key(key_next.key[len(name + ".") :]) values.append(key_next, table_next) # Picking up any sibling while not self.end(): _, name_next = self._peek_table() if not self._is_child(name, name_next): break key_next, table_next = self._parse_table() key_next = Key(key_next.key[len(name + ".") :]) values.append(key_next, table_next) else: table = Table( values, Trivia(indent, cws, comment, trail), is_aot ) result = table if is_aot and ( not self._aot_stack or name != self._aot_stack[-1] ): result = self._parse_aot(table, name) break else: raise self.parse_error( InternalParserError, ("_parse_item() returned None on a non-bracket character."), ) if isinstance(result, Null): result = Table(values, Trivia(indent, cws, comment, trail), is_aot) return key, result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_table(table):\n rows = table.find_all('tr')\n if not rows:\n raise ValueError(\"No rows for table\")\n pages = []\n table_tag = \"<table>\"\n tbl_headers = get_tbl_headers(rows)\n table_tag += \"<tr>\"\n for header in tbl_headers.keys():\n table_tag += conf.ADD_TH_TAG(header)\n table_tag += \"</tr>\"\n for row in rows:\n cols = row.find_all('td')\n if not cols:\n continue\n for page_name in cols[0].find_all('a'):\n if not page_name:\n continue\n pages.append(page_name.text)\n table_tag += '<tr>'\n for header, col in tbl_headers.items():\n try:\n table_tag += f\"<td>{preprocess_data(f'{header} : {cols[col].text}')} \\t</td>\"\n except IndexError:\n pass\n table_tag += '</tr>'\n table_tag += '</table>'\n if conf.DOWNLOAD_IMAGES:\n download_images(pages)\n return table_tag", "def parse_table_in_rows(self, table):\n parsed_table = []\n for tr in table.find_elements_by_tag_name('tr'):\n parsed_table.append(tr)\n return parsed_table", "def parseTable(chart):\n rowelems = chart.find_all('tr')\n rows = [rowelem.find_all('td') for rowelem in rowelems]\n data = [[elem.get_text() for elem in row] for row in rows]\n return(data)", "def _parse_table(res, key_index, value_index):\n data = OrderedDict()\n for sel in res.xpath('//tr'):\n columns = sel.xpath('td')\n if len(columns) == value_index+1:\n key = ''.join(columns[key_index].xpath('.//text()').extract())\n key = base.helpers.slugify(key.strip())\n value = ''.join(columns[value_index].xpath('.//text()').extract())\n value = value.strip()\n if key and value:\n data[key] = value\n return data", "def _parse_table(value):\n lines = value.split('\\n')\n header = None\n rows = []\n\n for l in lines:\n if l.startswith('+-'):\n pass\n elif l.startswith('|'):\n columns = [c.strip() for c in l.split('|')[1:-1]]\n if header is None:\n header = columns\n else:\n row = {}\n for i, c in enumerate(columns):\n if len(header)-1 <= i:\n row[i] = c\n else:\n row[header[i]] = c\n rows.append(row)\n return rows", "def _parse_table(text):\n\n text = str(text)\n try:\n text = text.split(\"<pre>\")[1]\n text = text.split(\"</pre>\")[0]\n text = text.split(\"To save this output\")[0]\n lines = text.split(\"\\n\")\n except Exception as exc:\n raise NNDCRequestError(f\"Unable to parse text:\\n{exc}\\n{text}\")\n table = {}\n headers = None\n for line in lines:\n tokens = line.split(\"\\t\")\n tokens = [t.strip() for t in tokens]\n if len(tokens) <= 1:\n continue\n if headers is None:\n headers = tokens\n headers = _parse_headers(headers)\n for header in headers:\n table[header] = []\n else:\n if len(tokens) != len(headers):\n raise NNDCRequestError(\n \"Too few data in table row\\n\"\n + f' Headers: \"{headers}\"\\n'\n + f' Row: \"{tokens}\"'\n )\n for header, token in zip(headers, tokens):\n table[header].append(token)\n return table", "def parse_key_value_table(table, header_tag):\n for tr in table.find_all(\"tr\"):\n if len(tr.find_all(\"th\")) > 0:\n continue\n kv = tr.find_all(\"td\")\n try:\n assert len(kv) == 2\n except:\n logger.warning(f\"{kv} doesnt have 2 entires, expected key-value pairs\")\n key, value = kv\n # if key looks like a description to a date, or value looks like date\n if key_is_date_like(ss(key)) or (\n not_intish(ss(value)) and is_date_parseable(value)\n ):\n # value is date like\n yield (ss(value), (header_tag, ss(key)))", "def parse_soup(self, table):\n rows = table.find_all('tr')\n list_of_lists = list()\n time = pd.Timestamp('now')\n for row in rows:\n row_list = list()\n row_list.append(time)\n for td in row.find_all('td')[1:]:\n row_list.append(td.text)\n if td('a'):\n for a in td('a'):\n if a.get('href'):\n m = re.search('teamId\\=(\\d+)', a.get('href'))\n if m:\n row_list.append(m.group(1))\n list_of_lists.append(row_list)\n return [[y for y in x if y] for x in list_of_lists[3:]]", "def parse_regular_table(table, header_tag):\n headers = [ss(k) for k in table.find_all(\"th\")]\n assert len(headers) > 0\n try:\n date_index = list(map(key_is_date_like, headers)).index(True)\n except ValueError:\n logger.warning(\"Couldnt find date-like key in {}\".format(headers))\n return\n for tr in table.find_all(\"tr\"):\n if len(tr.find_all(\"td\")) == 0 and len(tr.find_all(\"th\")) > 1:\n continue\n if len(tr.find_all('td')) < 2:\n logger.debug(\"Not enough values for row in regular table\")\n continue\n td_text = [ss(k) for k in tr.find_all(\"td\")]\n en_td_text = list(enumerate(td_text))\n\n # split into date and non date columns\n date_info = en_td_text[date_index][1]\n non_date_info = [v for k, v in en_td_text if k != date_index]\n yield (date_info, (header_tag, \"|\".join(non_date_info)))", "def render_table(parser, token):\r\n bits = token.split_contents()\r\n try:\r\n tag, table = bits.pop(0), parser.compile_filter(bits.pop(0))\r\n except ValueError:\r\n raise TemplateSyntaxError(\"'%s' must be given a table or queryset.\"\r\n % bits[0])\r\n template = parser.compile_filter(bits.pop(0)) if bits else None\r\n return RenderTableNode(table, template)", "def table_row(data, tpat=re.compile('(<(?P<sl>/)?(?P<tag>t[rdh]|table)[^>]*>)', re.I)):\n data = re.sub(r\"[\\r\\n]+\", \" \", data)\n data = re.sub(r\"(?i)\\s*<(/?)td[^>]*>\\s*\", r\"<\\1td>\", data)\n data = re.sub(r\"(?i)\\s*<(/?)th[^>]*>\\s*\", r\"<\\1th>\", data)\n data = re.sub(r\"(?i)\\s*<tr[^>]*>\\s*\", \"\\n<tr>\", data)\n data = re.sub(r\"(?i)\\s*</tr[^>]*>\\s*\", \"</tr>\\n\", data)\n data = re.sub(r\"(?i)\\s*<(/?)table[^>]*>\\s*\", r\"\\n<\\1table>\\n\", data)\n return data", "def parse(self, html):\n soup = BeautifulSoup(html)\n\n rankings = []\n tables = soup.findAll('table')\n rankings_table = tables[2]\n\n for tr in rankings_table.findAll('tr'):\n tds = tr.findAll('td')\n if len(tds) > 1:\n row = []\n for td in tds:\n row.append(str(self._html_unescape(self._recurseUntilString(td))))\n rankings.append(row)\n\n return rankings, False", "def parse_html_tables(html_filename):\n\n with open(html_filename) as f:\n p = SimpleHTMLTableParser()\n p.feed(f.read())\n return p.tables", "def parse_bid_table(table):\r\n columns = table.find_all('td')\r\n player_id = int(re.findall('\\d+', columns[0].a['href'])[0])\r\n player = columns[0].text\r\n owner = columns[1].text\r\n team_id = int(re.findall('\\d+', columns[2].img['src'])[0])\r\n team = table.img['alt']\r\n price = int(columns[3].text.replace(\".\", \"\"))\r\n bid_date = columns[4].text\r\n trans_date = columns[5].text\r\n status = columns[6].text\r\n return player_id, player, owner, team_id, team, price, bid_date, trans_date, status", "def extract_main_table_from_html(html):\n soup = bs(html, 'html.parser')\n table = soup.find('table')\n return(table)", "def _extract_raw_table(self, expr):\n str_start = \"<table\"\n str_end = \"/table>\"\n\n ind_start = expr.find(str_start)\n assert ind_start >= 0\n\n ind_end = expr.find(str_end)\n assert ind_end >= 0\n\n return expr[ind_start: ind_end + len(str_end)]", "def parse_table_file(file):\n\n rows = [row for row in csv.reader(file.decode().splitlines(), delimiter=\",\",\n doublequote=True, escapechar=None, quotechar='\"',\n quoting=csv.QUOTE_MINIMAL, skipinitialspace=True)]\n\n if len(rows) < 2:\n raise Exception(\"File must contain at least two rows.\")\n\n # get header\n attributes = rows[0]\n\n # go through the csv by row\n data = []\n for row in rows[1:]:\n data.append(row)\n\n if len(attributes) < 1:\n raise Exception(\"File must contain at least one column.\")\n\n return attributes, data", "def _parse_row(self, row):\n cols = None\n try:\n cols = row.find_all('td')\n cols = self._reformat_col_value(cols)\n cols = [ele.text.strip() for ele in cols]\n except IndexError:\n # Two cases will break the above: games that haven't happened yet,\n # and BR's redundant mid-table headers if future games, grab the\n # scheduling info. Otherwise do nothing.\n if len(cols) > 1:\n cols = [ele.text.strip() for ele in cols][0:5]\n return cols", "def parse_single_table(source, **kwargs):\n if kwargs.get(\"table_number\") is None:\n kwargs[\"table_number\"] = 0\n\n votable = parse(source, **kwargs)\n\n return votable.get_first_table()", "def parse_table(s, allow_wrap=False):\n r = []\n columns = []\n for l in s.splitlines():\n if not l.strip():\n columns = []\n continue\n if rx_header_start.match(l):\n # Column delimiters found. try to determine column's width\n columns = []\n x = 0\n while l:\n match = rx_col.match(l)\n if not match:\n break\n columns.append((x + len(match.group(1)),\n x + len(match.group(1)) + len(\n match.group(2))))\n x += match.end()\n l = l[match.end():]\n elif columns: # Fetch cells\n if allow_wrap:\n row = [l[f:t] for f, t in columns]\n if row[0].startswith(\" \") and r:\n for i, x in enumerate(row):\n r[-1][i] += x\n else:\n r += [row]\n else:\n r += [[l[f:t].strip() for f, t in columns]]\n if allow_wrap:\n return [[x.strip() for x in row] for row in r]\n else:\n return r", "def identifyTableEntry(line):\n matches = re.findall('<td>', line)\n if len(matches) > 0:\n return True", "def parse(self):\r\n hdr = {'User-Agent': 'Mozilla/5.0'}\r\n url = CostOfLiving.URL.format(self.city)\r\n req = Request(url, headers=hdr)\r\n page = urlopen(req)\r\n soup = BeautifulSoup(page, \"html.parser\")\r\n self.table = soup.find(\"table\", attrs={\"class\": \"data_wide_table\"})", "def parse_table(self, table_name):\n table_offset = self.catalog.get(table_name)\n if not table_offset:\n logging.error(f\"Could not find table {table_name} in DataBase\")\n return\n table_offset = table_offset * self.page_size\n table = self._tables_with_data.get(table_offset)\n if not table:\n table_def = self._table_defs.get(table_offset)\n if table_def:\n table = TableObj(offset=table_offset, val=table_def)\n logging.info(f\"Table {table_name} has no data\")\n else:\n logging.error(f\"Could not find table {table_name} offset {table_offset}\")\n return\n access_table = AccessTable(table, self.version, self.page_size, self._data_pages, self._table_defs)\n return access_table.parse()", "def table(self, doc, level, output):\n output('<table border=\"1\" cellpadding=\"2\">\\n')\n for row in doc.getRows()[0]:\n output(\"<tr>\\n\")\n for column in row.getColumns()[0]:\n str = ('<%s colspan=\"%s\" align=\"%s\" valign=\"%s\">'\n % (column.getType(),\n column.getSpan(),\n column.getAlign(),\n column.getValign()))\n output(str)\n for c in column.getChildNodes():\n getattr(self, self.element_types[c.getNodeName()]\n )(c, level, output)\n output(\"</\" + column.getType() + \">\\n\")\n output(\"</tr>\\n\")\n output(\"</table>\\n\")", "def _peek_table(self): # type: () -> Tuple[bool, str]\n # Save initial state\n idx = self._save_idx()\n marker = self._marker\n\n if self._current != \"[\":\n raise self.parse_error(\n InternalParserError, (\"_peek_table() entered on non-bracket character\")\n )\n\n # AoT\n self.inc()\n is_aot = False\n if self._current == \"[\":\n self.inc()\n is_aot = True\n\n self.mark()\n\n while self._current != \"]\" and self.inc():\n table_name = self.extract()\n\n # Restore initial state\n self._restore_idx(*idx)\n self._marker = marker\n\n return is_aot, table_name", "def execute_table_widget_parser_test(filename_root, sorted_table_series=''):\n file_location = get_test_file_path('{0}.html'.format(filename_root))\n with open(file_location, 'r') as fp:\n table_widget_html = fp.read()\n file_location = get_test_file_path('{0}_expected.tsv'.format(filename_root))\n with open(file_location, 'r') as fp:\n table_widget_expected_tsv = fp.read()\n table_widget_parser = TableWidgetParser(table_widget_html, sorted_table_series)\n assert table_widget_parser.convert_to_tsv() == table_widget_expected_tsv", "def get_table(html) -> None:\n\tre_table_class = re.compile('.*2iSP.*') # familiar regex template (str w/ '2iSP')\n\ttable_class = html.find('div', {'class': re_table_class})\n\ttable_lst = re.findall('[А-Я|A-Z][^А-Я|A-Z]*', table_class.text) # regex for capitals\n\n\tfor param in table_lst:\n\t\tif 'Осадки' in param:\n\t\t\tweather_dict['precipation'] = re.search(r'\\d+', param).group()\n\t\telif 'Ветер' in param:\n\t\t\tweather_dict['wind'] = re.search(r'\\d+', param).group()\n\t\telif 'Давление' in param:\n\t\t\tweather_dict['pressure'] = re.search(r'\\d+', param).group()\n\t\telif 'Восход' in param:\n\t\t\tweather_dict['sunrise'] = ':'.join(re.findall(r'\\d+', param))\n\t\telif 'Закат' in param:\n\t\t\tweather_dict['sunset'] = ':'.join(re.findall(r'\\d+', param))", "def scrape_table_data(url):\n html = requests.get(url).text\n soup = BeautifulSoup(html, 'html.parser')\n\n return soup.select('table.wikitable.sortable td')", "def parse(self):\n for index in range(len(self.columns)):\n if index in self.columns:\n self.parsed_table[self.columns[index].col_name_str] = []\n if not self.table.linked_pages:\n return self.create_empty_table()\n for data_chunk in self.table.linked_pages:\n original_data = data_chunk\n parsed_data = parse_data_page_header(original_data, version=self.version)\n\n last_offset = None\n for rec_offset in parsed_data.record_offsets:\n # Deleted row - Just skip it\n if rec_offset & 0x8000:\n last_offset = rec_offset & 0xfff\n continue\n # Overflow page\n if rec_offset & 0x4000:\n # overflow ptr is 4 bits flags, 12 bits ptr\n rec_ptr_offset = rec_offset & 0xfff\n # update last pointer to pointer without flags\n last_offset = rec_ptr_offset\n # The ptr is the offset in the current data page. we get a 4 byte record_pointer from that\n overflow_rec_ptr = original_data[rec_ptr_offset:rec_ptr_offset + 4]\n overflow_rec_ptr = struct.unpack(\"<I\", overflow_rec_ptr)[0]\n record = self._get_overflow_record(overflow_rec_ptr)\n if record:\n self._parse_row(record)\n continue\n # First record is actually the last one - from offset until the end of the data\n if not last_offset:\n record = original_data[rec_offset:]\n else:\n record = original_data[rec_offset:last_offset]\n last_offset = rec_offset\n if record:\n self._parse_row(record)\n return self.parsed_table", "def parse_from_HTML(row):\n cols = row.find_all('td')\n if len(cols) < 1:\n raise ValueError('The row {} is empty.'.format(row))\n # We check for a faction regardless of the number of columns in the row.\n text = cols[0].string.strip()\n faction = LogItem._get_faction(text)\n events = None\n if len(cols) == 3:\n # There are three columns in the row, so this action did have an effect on the game state.\n events = LogItem._compute_events(cols[1], cols[2])\n return LogItem(text, faction, events)", "def parse_table(soup, start_gen, end_gen):\n pokes = []\n for cell in soup.find_all(\"td\", attrs={'style': None}):\n for name in cell.find_all(\"a\"):\n pokes.append(name.string)\n\n start_index = pokes.index(GEN_STARTS_WITH[start_gen])\n end_index = pokes.index(GEN_ENDS_WITH[end_gen]) + 1\n\n # Doesn't have to be ordered, just personal preference.\n unique_list = OrderedSet(pokes[start_index:end_index])\n\n if start_gen != end_gen:\n print(f\"{len(unique_list)} Pokémon from gen {start_gen} to {end_gen} were fetched.\")\n else:\n print(f\"{len(unique_list)} Pokémon from gen {start_gen} were fetched.\")\n\n pkmn_string = ', '.join(unique_list)\n\n for key, value in NIDORAN_CASE.items():\n # Handling of Nidoran male/female symbols.\n pkmn_string = pkmn_string.replace(key, value)\n\n return pkmn_string", "def _parse_html_status(table):\n dfs = pandas.read_html(str(table))\n df = dfs[0]\n # moodle pads the end of the table with empty rows\n df = df.dropna(how='all', axis=0)\n\n cols = list(df.columns)\n\n mapping = {\n '.*First name.*Surname.*': 'Name',\n 'Username.*': 'Username',\n 'Status.*': 'Status',\n 'Grade.*': 'Grade',\n }\n\n for oldname, newname in mapping.items():\n for i, colname in enumerate(cols):\n cols[i] = re.sub(oldname, newname, colname)\n\n df.columns = cols\n df = df.set_index('Username')\n\n return df", "def parse_results(table):\n results = []\n\n # FILL IN THE BLANK: Read each row from the table and append it to\n # `results` as a list. Be sure to also get the `href` value of the link\n # to the profile and include as the last element of each result list.\n\n return results", "def get_table_row_values(self):\n tag_items = self.soup.find_all(\"tr\")\n table_rows = []\n for tag_item in tag_items:\n tag_child_item_values = tag_item.find_all(\"td\")\n tag_item_child_values = []\n for tag_child_item_value in tag_child_item_values:\n tag_item_child_values.append(tag_child_item_value.text.strip())\n table_rows.append(tag_item_child_values)\n return table_rows", "def get_table_data(table):\n pattern_body = re.compile(r'(?ims)\\<tbody\\>(.*?)\\</tbody\\>')\n pattern_rows = re.compile(r'(?ims)\\<tr\\>(.*?)\\</tr\\>')\n pattern_cols = re.compile(r'(?ims)\\<td.*?\\>([^<]+?)\\<.*?/td\\>')\n\n body = pattern_body.findall(table)[0]\n return [\n list(map(lambda x: html.unescape(x), pattern_cols.findall(row)[:3]))\n for row in pattern_rows.findall(body)]", "def table_parsing(self):\n table_count=0\n if self.table: \n for tebil in self.table:\n json_list=[]\n try:\n table_caption = wtp.parse(str(tebil)).tables[0].caption\n table_folder_name=remove_markup(str(table_caption))\n table_folder_name=table_folder_name.lower()\n table_folder_name=table_folder_name.strip()\n except Exception as e:\n print('Exception: table folder name or out of list in table', str(e))\n continue \n if table_caption:\n try:\n self.revision_page_folder_path=os.path.join(self.rd_folder_path_table,self.page_folder)\n if not os.path.exists(self.revision_page_folder_path):\n os.mkdir(self.revision_page_folder_path)\n table_folder_name=table_folder_name.strip('\\n')\n revision_table_folder_path=os.path.join(self.revision_page_folder_path,table_folder_name)\n revision_table_folder_path=revision_table_folder_path.strip()\n if not os.path.exists(revision_table_folder_path):\n os.mkdir(revision_table_folder_path)\n except Exception as e:\n print('Exception: revision table folder', str(e))\n continue\n table_count=table_count+1\n json_list.append(str(tebil))\n json.dump(json_list, open(os.path.join(revision_table_folder_path, self.revision_id_parent + '_' + self.revision_id_current + \".json\"), \"w\"))\n print('Table caption: ', table_folder_name)\n table_count=table_count+1 \n return table_count", "def html_table_to_xmltree(html):\n node = et.fromstring(re.sub(r'>\\s+<', '><', html.strip()))\n xml = html_table_to_xmltree_sub(node)\n return XMLTree(xml)", "def extract_data(self):\r\n self.parse()\r\n lst = []\r\n for i in self.table.text.split(\"\\n\")[3:]:\r\n if i != \"\" and bool(re.search(r'\\d', i)):\r\n lst.append(i.replace(u'\\xa0', ''))\r\n single = lst.pop(-3)\r\n lst = [i + \" \" + j for i, j in zip(lst[::2], lst[1::2])]\r\n lst.append(single)\r\n return lst[0:28]", "def parse_view_page(self):\n for row in self.driver.find_elements_by_css_selector(\"table\"):\n cells = row.find_elements_by_tag_name(\"td\")\n for cell in cells:\n yield cell.text", "def test_parse_taxa_summary_table(self):\r\n actual = parse_taxa_summary_table(self.taxa_summary1.split('\\n'))\r\n self.assertItemsEqual(actual[0], self.taxa_summary1_expected[0])\r\n self.assertItemsEqual(actual[1], self.taxa_summary1_expected[1])\r\n assert_almost_equal(actual[2], self.taxa_summary1_expected[2])", "def add_table(self,table):\n \n attr_list = []\n name = None\n comment = \"\"\n new_table = None\n t_id = table.attrib['id']\n \n \n #first cycles the child elements to find table name\n #and creates a table object\n for child in table:\n #table name\n if child.attrib['name'] == 'name':\n name = super(DatabaseUmlParser,self).stripHashtags(child[0].text)\n \n elif child.attrib['name'] == 'comment':\n comment = super(DatabaseUmlParser,self).stripHashtags(child[0].text)\n \n \n #check if table name found and table created\n if not name == None:\n new_table = db_table.Table(name,t_id,comment)\n \n else: ###\n self.err.print_error(\"dia:table_name_missing\") ###\n e_code = self.err.exit_code[\"xml\"] ###\n ###\n exit(e_code) ###\n \n \n #then cycles again to find the other relevant child elements\n for child in table:\n \n if child.attrib['name'] == 'attributes': \n new_root = child\n \n for child in new_root:\n new_attr = self.parse_attribute(child,new_table)\n attr_list.append(new_attr)\n \n if new_attr.p_key_flag:\n new_table.p_key.append(new_attr)\n \n new_table.attr_list = attr_list\n self.table_dict[t_id] = new_table\n \n return", "def process_table(soup):\n rests = []\n rows = soup.find('table',attrs={\"id\" : \"rest_list\"}).find_all('tr')\n for r in rows[1:len(rows)]:\n rdict = rest_row(r)\n if rdict:\n rests.append(rdict)\n return rests", "def depart_table(self, node: docutils.nodes.Node) -> None:\n pass", "def _parse_doc(self, html_doc):\n\n c = itertools.count(start=1)\n\n # load adp page and parse out stats table\n doc = html.fromstring(html_doc)\n rows = doc.xpath(\n '//table[@id=\"adp_table\"]/tr[contains(@class, \"contentrow\")]')\n\n for row in rows:\n text = [v.strip() for v in row.xpath('.//text()')[1:]]\n text.insert(0, c.next())\n yield text", "def _table_data_text(table):\n\n def row_get_data_text(tr, coltag=\"td\"): # td (data) or th (header)\n return [td.get_text(strip=True) for td in tr.find_all(coltag)]\n\n rows = []\n trs = table.find_all(\"tr\")\n header_row = row_get_data_text(trs[0], \"th\")\n if header_row: # if there is a header row include first\n rows.append(header_row)\n trs = trs[1:]\n for tr in trs: # for every other table rows\n rows.append(row_get_data_text(tr, \"td\")) # data row\n\n return rows", "def html_table_to_xmltree_sub(node):\n # Split text into Token nodes\n # NOTE: very basic token splitting here... (to run through CoreNLP?)\n if node.text is not None:\n for tok in re.split(r'\\s+', node.text):\n node.append(et.Element('token', attrib={'word':tok}))\n \n # Recursively append children\n for c in node:\n node.append(html_table_to_xmltree_sub(c))\n return node", "def transform_single_table_def(self, node: Tree) -> Table:\n assert node.data == 'table' and len(node.children) == 2\n\n table_name: Token = node.children[0]\n fields_node: Tree = node.children[1]\n\n # Check that all field names are unique\n collected = set()\n for field_name in fields_node.children:\n if field_name in collected:\n raise SyntaxError(\n f\"duplicated field name {str(field_name)!r} \"\n f\"at line {field_name.line} column {field_name.column}\",\n )\n collected.add(field_name)\n\n return Table(table_name, tuple(fields_node.children))", "def _get_all_table_elements(self):\n for row in self.driver.find_elements_by_css_selector(\"table\"):\n cells = row.find_elements_by_tag_name(\"td\")\n for cell in cells:\n cell_text = cell.text\n if \"VIEW\" in cell_text:\n yield (cell.get_attribute(\"href\"), cell_text)\n else:\n yield cell_text", "def _parse_result_page(self, page):\n items = []\n table = list(page.findall(\".//table[@id='browse']\"))[0]\n for row in (x for x in list(table.findall('tr'))[1:]\n if len(x.getchildren()) != 1):\n item = self._parse_item_row(row)\n items.append(item)\n return items", "def visit_table(self, node: docutils.nodes.reference) -> None:\n self.entry = {}\n self.header = {}", "def get_table_rows(soup):\n tbody = soup.find('tbody')\n return [tr.find_all('td') for tr in tbody.find_all('tr')]", "def parse(t):\n return t", "def parse(html_doc):\n soup = BeautifulSoup(html_doc, 'html.parser')\n table = soup.find('div', class_=\"container-fluid cols_table show_visited\")\n# print table.prettify().encode('UTF-8')\n jobstats = []\n\n for row in table:\n jobstats.append({\n \"title\":row.find('div', class_=\"col-sm-7\").a.text,\n \"category\":row.find('div', class_=\"text-muted\").a.text,\n \"price\":row.find('div', class_=\"col-sm-2 amount title\").\n text.strip(),\n \"applications\":row.find(\n 'div', class_=\"col-sm-3 text-right text-nowrap hidden-xs\"\n ).text.strip()\n })\n return jobstats", "def table_parser(table_files, study, outdir, timepoint=None, dtype=\"wide\",\n auto_type=False):\n # Welcome\n print(\"Starting tables parsing...\")\n\n # Check inputs\n if dtype not in (\"wide\", \"long\"):\n raise ValueError(\"Unexpected data type '{0}'.\".format(dtype))\n\n # Parse all the tables\n tables = []\n with progressbar.ProgressBar(max_value=len(table_files),\n redirect_stdout=True) as bar:\n for cnt, path in enumerate(table_files):\n\n # Open the TSV table\n with open(path, \"rt\") as open_file:\n raw_table = open_file.readlines()\n header = raw_table[0].rstrip(\"\\n\").split(\"\\t\")\n table_content = []\n for row in raw_table[1:]:\n row = row.rstrip(\"\\n\").split(\"\\t\")\n if auto_type:\n raise NotImplementedError(\n \"The automatic typing of columns has not been yet \"\n \"implemented.\")\n table_content.append(row)\n\n # Generate the final structure\n table = {}\n qname = os.path.basename(path).replace(\".tsv\", \"\")\n center = DEFAULT_CENTER\n if timepoint is None:\n timepoint = DEFAULT_TIMEPOINT\n for row_cnt, row in enumerate(table_content):\n assessment_id = \"{0}_q{1}_{2}\".format(\n study.lower(), qname, timepoint)\n subject = row[0].replace(\"sub-\", \"\")\n if dtype == \"wide\":\n assessment_id = \"{0}_{1}\".format(\n assessment_id, row_cnt + 1)\n assessment_id = \"{0}_{1}\".format(assessment_id, subject)\n\n # Create assessment structure\n assessment_struct = {\n \"identifier\": assessment_id,\n \"timepoint\": timepoint}\n\n # Build the subject questionnaires structure for this timepoint\n subj_questionnaires = {\n \"Questionnaires\": OrderedDict(),\n \"Assessment\": assessment_struct\n }\n\n # Fill the questionnaire structure\n qdata = OrderedDict()\n for question, answer in zip(header, row):\n question = question.decode(\"utf-8\", \"ignore\").encode(\n \"utf-8\")\n answer = answer.decode(\"utf-8\", \"ignore\").encode(\"utf-8\")\n qdata[question] = answer\n subj_questionnaires[\"Questionnaires\"][qname] = qdata\n\n # Add this questionnaire to the patient data\n if center not in table:\n table[center] = {}\n if subject not in table[center]:\n table[center][subject] = []\n table[center][subject].append(subj_questionnaires)\n\n # Saving result\n save_parsing(table, outdir, study, \"tables-{0}\".format(qname))\n tables.extend(glob.glob(\n os.path.join(outdir, \"tables-{0}*.json\".format(qname))))\n\n # Update progress bar\n bar.update(cnt)\n\n # Goodbye\n print(\"Done.\")\n\n return tables", "def _get_table_val(val):\n text = val.text.strip()\n if val.br:\n val = \", \".join(text.split('\\r\\n'))\n elif val.sup:\n val = \"\".join(map(str, val.contents))\n elif NON_BREAK_SPACE in text:\n val = \", \".join(text.split(f' {NON_BREAK_SPACE} {NON_BREAK_SPACE} '))\n else:\n val = text\n\n return val", "def _parse_row(self, record):\n original_record = record\n reverse_record = record[::-1]\n # Records contain null bitmaps for columns. The number of bitmaps is the number of columns / 8 rounded up\n null_table_len = (self.table_header.column_count + 7) // 8\n if null_table_len and null_table_len < len(original_record):\n null_table = record[-null_table_len:]\n # Turn bitmap to a list of True False values\n null_table = [((null_table[i // 8]) & (1 << (i % 8))) != 0 for i in range(len(null_table) * 8)]\n else:\n logging.error(f\"Failed to parse null table column count {self.table_header.column_count}\")\n return\n if self.version > 3:\n field_count = struct.unpack_from(\"h\", record)[0]\n record = record[2:]\n else:\n field_count = struct.unpack_from(\"b\", record)[0]\n record = record[1:]\n\n relative_records_column_map = {}\n # Iterate columns\n for i, column in self.columns.items():\n # Fixed length columns are handled before variable length. If this is a variable length column add it to\n # mapping and continue\n if not column.column_flags.fixed_length:\n relative_records_column_map[i] = column\n continue\n\n self._parse_fixed_length_data(record, column, null_table)\n if relative_records_column_map:\n relative_records_column_map = dict(sorted(relative_records_column_map.items()))\n metadata = self._parse_dynamic_length_records_metadata(reverse_record, original_record,\n null_table_len)\n if not metadata:\n return\n self._parse_dynamic_length_data(original_record, metadata, relative_records_column_map)", "def appendToHTMLTable(tableData,table):\n for rowData in tableData:\n row = html.Tr()\n for cellData in rowData:\n if isinstance(cellData,datetime.datetime):\n # use ISO format but drop the time zone offset\n row.append(html.Td(cellData.isoformat()[:-6]))\n elif cellData is types.InvalidValue:\n row.append(html.Td(className='invalid',title='Invalid value'))\n else:\n row.append(html.Td(str(cellData)))\n row['className'] = 'datarow'\n table.append(row)\n return table", "def test_parse_classic_otu_table_floats_in_table(self):\r\n\r\n data = self.otu_table1_floats\r\n data_f = (data.split('\\n'))\r\n obs = parse_classic_otu_table(data_f)\r\n exp = (['Fing', 'Key', 'NA'],\r\n ['0', '1', '2', '3', '4'],\r\n array([[19111.0, 44536.0, 42.0], [1216.0, 3500.0, 6.0],\r\n [1803.0, 1184.0, 2.0], [1722.1, 4903.2, 17.0],\r\n [589.6, 2074.4, 34.5]]),\r\n self.expected_lineages1)\r\n self.assertEqual(obs[0], exp[0])\r\n self.assertEqual(obs[1], exp[1])\r\n assert_almost_equal(obs[2], exp[2])\r\n self.assertEqual(obs[3], exp[3])", "def parseElement(self, line):\n\n\t\tcols = string.split(line, \",\")\n\n\t\tfor col in cols:\n\t\t\tself.element.append(col)", "def parse_row(row, download_to=\"../data/raw/\"):\n\n row_template = utils.get_row_template(row)\n\n url = row_template[\"link\"]\n link_format = url.split(\".\")[-1].lower()\n file_name = url.split(\"/\")[-1]\n local_file_path = download_to + file_name\n\n if not os.path.exists(local_file_path):\n utils.download_file(url, local_file_path)\n\n if link_format == \"csv\":\n table_df = csv_cleaner.try_to_parse_csv(local_file_path)\n elif link_format == \"pdf\":\n table_df = pdf_cleaner.try_parse_pdf(local_file_path)\n elif link_format == \"odt\":\n table_df = odt_cleaner.try_parse_odt(local_file_path)\n elif link_format == \"doc\":\n table_df = doc_cleaner.try_parse_doc(local_file_path)\n elif link_format == \"xlsx\":\n table_df = xlxs_cleaner.try_parse_xlsx(local_file_path)\n elif link_format == \"ods\":\n table_df = ods_cleaner.try_parse_ods(local_file_path)\n else:\n raise Exception(\"Not sure how to parse {}...\".format(local_file_path))\n\n if table_df is None:\n return None\n\n table_df[\"department\"] = row_template[\"department\"]\n table_df[\"period\"] = row_template[\"period\"]\n table_df[\"link\"] = row_template[\"link\"]\n\n return table_df", "def fill_table(info):\n # extrac attributes from info struct\n data = info[\"data\"]\n table = info[\"table\"]\n header = info[\"header\"]\n row_num = info[\"row_num\"]\n\n currency_type_num = row_num - 1\n row_index = 0\n col_index = 0\n i = 0\n while i < len(data):\n if data[i].find(\"%\") > 0:\n # stat data\n while i < len(data) and row_index < currency_type_num:\n table[row_index+1].append(data[i])\n row_index += 1\n i += 1\n # Reset row_index\n row_index = 0\n else:\n if i < row_num - 1:\n # currency Type\n table[i+1].append(data[i])\n else:\n # time marker\n if data[i] != header:\n table[0].append(data[i])\n i += 1\n\n # End loop\n return None", "def parse_tables_from_html(html, md_file):\n soup = BeautifulSoup(html, features=\"lxml\")\n table_contents = \"\"\n for table in soup.select('table'):\n try:\n table_content = process_table(table)\n table_contents += table_content\n except:\n continue\n\n if not table_contents:\n print(\"NO VALID TABLE\")\n return\n\n # write to the file\n with codecs.open(md_file, mode='w', encoding='utf-8') as file:\n file.write(table_contents)\n print(\"The Table is saved in\" + md_file)", "def write_table(self, tab):\n self.save_text()\n\n table = list()\n row = list()\n headers = tab['c'][3]\n if headers:\n has_content = False\n for col in headers:\n self.list_parse(col, cell_content=True)\n cell_content = self.get_content()\n row.append(cell_content)\n if cell_content != '':\n has_content = True\n if has_content:\n row = tuple(row)\n table.append(row)\n t_content = tab['c'][4]\n for line in t_content:\n row = list()\n for col in line:\n self.list_parse(col, cell_content=True)\n cell_content = self.get_content()\n row.append(cell_content)\n row = tuple(row)\n table.append(row)\n table = tuple(table)\n self.tables.append((table, (self.context, self.ancestor)))", "def get_player_stats(table_with_players):\n\n for element in table_with_players.tbody:\n if isinstance(element, bs4.element.Tag):\n one_player_stats = element.find_all('td')\n yield one_player_stats\n yield 'generator empty'", "def parse(self):\n \n root = self.xml_tree.getroot()\n \n #run for creating tables\n for child in root[1]:\n if child.attrib['type'] == 'Database - Table':\n self.add_table(child)\n \n \n #if table_dict empty -> wrong type of dia diagram\n if self.table_dict == {}: ###\n self.err.print_error(\"parser:database_wrong_dia\") ###\n e_code = self.err.exit_code[\"parser\"] ###\n ###\n exit(e_code) ###\n \n \n #run for adding references\n for child in root[1]:\n if child.attrib['type'] == 'Database - Reference':\n self.add_reference(child)\n \n return", "def scrapeTable():\n\tfrom bs4 import BeautifulSoup\n\tfrom urllib2 import urlopen\n\n\turl = \"https://en.wikipedia.org/wiki/List_of_the_largest_libraries_in_the_United_States\"\n\n\t# read the html content\n\thtml = urlopen(url).read()\n\n\t# create BeautifulSoup from html\n\ttable = BeautifulSoup(html)\n\n\t# find all table row elements\n\trows = table.findAll('tr')\n\n\tarr = []\n\tfor tr in rows:\n\n\t\t# find all columns\n\t\tcols = tr.findAll('td')\n\n\t\t# column text\n\t\tx = [c.text for c in cols]\n\n\t\t# filter the content\n\t\tif len(x)!=0:\n\t\t\ttry:\n\t\t\t\tint(x[0])\n\t\t\texcept Exception, e:\n\t\t\t\tbreak\n\n\t\t\tarr.append(x)\n\n\treturn arr", "def tabular_parser(path: str, header: bool = True) -> TabularData:\n with open(path, \"r\") as read_obj:\n csv_reader = reader(read_obj)\n list_of_rows = list(csv_reader)\n rows = np.array(list_of_rows)\n\n if header:\n return TabularData(column_names=rows[0, :], data=rows[1:, :])\n else:\n return TabularData(column_names=None, data=rows[1:, :])", "def parse_table_data(json_data):\n data = [row for row in json_data[\"aaData\"]]\n\n data = [\n {\n \"status\": row[0],\n \"nature\": row[1],\n \"unavailabilityInterval\": row[2].replace(\"&nbsp;\", \" \"),\n \"inArea\": row[3],\n \"outArea\": row[4],\n \"newNTC\": BeautifulSoup(row[5], \"lxml\").text,\n \"detailId\": row[6],\n }\n for row in data\n ]\n\n for row in data:\n interval = row.pop(\"unavailabilityInterval\")\n start_date, end_date = EntsoeAPI.parse_unavailability_interval(\n interval\n )\n\n row.update(\n {\n \"unavailabilityStart\": start_date,\n \"unavailabilityEnd\": end_date,\n }\n )\n\n for row in data:\n \"\"\"\n Decode Outage Status \n -----------------\n A05: Active\n A09: Cancelled\n A13: Withdrawn\n \"\"\"\n if \"A05\" in row[\"status\"]:\n row[\"status\"] = \"Active\"\n elif \"A09\" in row[\"status\"]:\n row[\"status\"] = \"Cancelled\"\n elif \"A13\" in row[\"status\"]:\n row[\"status\"] = \"Withdrawn\"\n\n \"\"\"\n Decode Outage Type \n -----------------\n A54: Forced\n A53: Planned\n \"\"\"\n if \"A53\" in row[\"nature\"]:\n row[\"nature\"] = \"Planned\"\n elif \"A54\" in row[\"nature\"]:\n row[\"nature\"] = \"Forced\"\n return data", "def get_table_by_id(soup, id):\n # dont include .tbody after the find() for some reason\n html_table = soup.find(id=id)\n if html_table is None:\n return None\n rows = html_table.find_all('tr')[1:]\n return [row.contents for row in rows]", "def importXmlToTable(table):\n request = \"DELETE FROM %s\" % table\n execute(request)\n elms = doc.getElementsByTagName(\"text\")\n for el in elms:\n data = parseTextElm(el)\n if data == None: continue\n textstring, audiouri, xmlid, textflag, audioflag = data\n if el.parentNode.tagName == \"accelerator\":\n keys = \"\\\"%s\\\"\" % el.parentNode.getAttribute(\"keys\")\n else:\n keys=\"NULL\"\n \n request = \"\"\"INSERT INTO %(table)s (textstring, textflag, audioflag, audiouri, xmlid, actualkeys) \n VALUES (\\\"%(textstring)s\\\", %(textflag)d, %(audioflag)d, \\\"%(audiouri)s\\\", \\\"%(xmlid)s\\\", %(keys)s)\"\"\" \\\n % {\"table\": table, \"textstring\": textstring, \"textflag\": textflag, \"audioflag\": audioflag, \"audiouri\": audiouri, \n \"xmlid\": xmlid, \"keys\": keys}\n execute(request)\n \n setRoles(table)\n findMnemonicGroups(table)\n findAcceleratorTargets(table)\n return", "def _parse_value_label_table(self, sfile):\n byteorder = self._byteorder\n \n nentries = unpack(byteorder + 'l', sfile.read(4))[0]\n txtlen = unpack(byteorder + 'l', sfile.read(4))[0]\n off = []\n val = []\n txt = []\n for i in range(nentries):\n off.append(unpack(byteorder+'l',sfile.read(4))[0])\n for i in range(nentries):\n val.append(unpack(byteorder+'l',sfile.read(4))[0])\n \n txt_block = unpack(str(txtlen) + \"s\", sfile.read(txtlen))\n txt = [t.decode('iso-8859-1') \n for b in txt_block for t in b.split(b'\\0')]\n \n # put (off, val) pairs in same order as txt\n sorter = list(zip(off, val))\n sorter.sort()\n \n # dict of val[i]:txt[i]\n table = {sorter[i][1]: txt[i] for i in range(len(sorter))}\n \n return table", "def load_xml(self,filename):\n self.initvars()\n source = iter(ET.iterparse(filename, events = ('start','end')))\n self.name = source.next()[1].tag\n for event,elem in source:\n if event == 'end' and elem.tag == 'row':\n row = [None]*self.numcols()\n for name,val in elem.attrib.items():\n try:\n idx = self.getColIndex(name)\n except ColumnNotFoundError:\n idx = len(self.cols)\n row.append(None)\n # Add new column to the table\n self.cols.append(set([name]))\n for oldrow in self.data:\n oldrow.append(None)\n row[idx] = val\n self.data.append(row)\n self.initTypes()", "def find_table(self):\n tables = self.document.tables\n header = []\n for table in tables:\n for row in table.rows:\n header[:] = []\n for cell in row.cells:\n for para in cell.paragraphs:\n header.append(para.text.strip(' '))\n # new versions of final CAPA's keep project information in a table\n if 'Project Information' in header:\n self.read_new_format(table)\n # check if elements in findings is also in header\n cond = len(header) == 5 and header[4] == 'Rating'\n if cond or [x for x in self.findings for y in header if x in y] == self.findings:\n self.table = table\n return", "def parse_table_from_output(output, policy_name):\n\n headers = []\n row = []\n\n separator = \"|\"\n\n for line in output.split(\"\\n\"):\n if not headers and line.startswith(\"Name\"):\n headers = [header.strip() for header in line.split(separator)]\n elif line.startswith(policy_name):\n if row:\n raise Exception(\"Multiple license policies detected - expected 1.\")\n row = [val.strip() for val in line.split(separator)]\n\n if not headers:\n raise Exception(\"Table not found in output!\")\n\n if not row:\n raise Exception(\"No license policies found - expected 1.\")\n\n return dict(zip(headers, row))", "def _unpack(row,kind=\"td\"):\n elts=row.findall(\".//%s\" % kind)\n elts=[s.text_content() for s in elts]\n elts=[\" \".join(\n (s.replace(\"\\n\",\"\")\n # .replace(\"\\ue002\",\"\")\n # .replace(\"\\ue004\",\"\")\n .encode(\"ascii\",\"ignore\")\n ).strip().split())\n for s in elts]\n return elts", "def _parse_entry_table(self,obj,padding=0):\n data = ''\n\n for item in obj.data:\n line = self.parse(item,padding)\n tokens = line.split('\\n')\n tokens[0] = '%s [0x%x] {' % (tokens[0][:-1],int(item.offset))\n data += '\\n'.join(tokens) + '\\n'\n\n return data", "def parse_row(self, response, row):\n raise NotImplementedError", "def get_data_from_row(x):\n\n import re\n\n data = []\n x = re.findall(\"<(TD|td)[^>*]+>(.*?)(</(td|TD)>)\", x, re.S)\n for rec in [tmp[1] for tmp in x]:\n if len(rec) == 0:\n data.append(None)\n continue\n # If no value\n if re.match(\"[-]+\", rec):\n data.append(None)\n continue\n # If there is a special font: remove font\n tmp = re.findall(\"<font[^>*]+>(.*?)</font>\", rec, re.S)\n if tmp:\n data.append(tmp[0])\n continue\n # If this is an image cells: extract weather type\n tmp = re.findall(\"<img (.*?)alt='([^('.)*]+).*\", rec, re.S)\n if tmp:\n data.append(tmp[0][1])\n continue\n # Date cell?\n tmp = re.findall(\".*([0-9]{2}/[0-9]{2}/[0-9]{4}).*\", rec, re.S)\n if tmp:\n data.append(tmp[0])\n continue\n # Time cell?\n tmp = re.findall(\".*([0-9]{2}:[0-0]{2}).*\", rec, re.S)\n if tmp:\n data.append(tmp[0])\n continue\n\n raise Exception(\"unknown handling of \\\"{:s}\\\"\".format(rec))\n\n # We expect the first two elements to contain date/time information.\n # convert to one.\n from datetime import datetime as dt\n data = [dt.strptime(\"{:s} {:s}\".format(data[0], data[1]), \"%m/%d/%Y %H:%M\")] + data[2:]\n\n return data", "def verify_table(scope=driver):\n try:\n table = scope.find_element_by_tag_name('table')\n entries = table.find_elements_by_class_name('m-datatable__row--even')\n if len(entries) > 0:\n return \"Success\"\n else:\n return \"Error: No table entries\"\n except Exception as e:\n return \"Error: \" + str(e)", "def from_table_row(row):\n result = None\n\n cells = row.find_elements_by_css_selector(\"td\")\n if len(cells) > 0:\n result = MeterElement()\n checkbox_cell = cells[0]\n result.checkbox = checkbox_cell.find_element_by_css_selector(\n 'input[type=\"checkbox\"]'\n )\n\n data_cells = cells[1:]\n result.account_id = data_cells[0].text\n result.description = data_cells[1].text\n result.meter_id = result.description.split()[-1]\n result.min_date = dateparser.parse(data_cells[2].text).date()\n result.max_date = dateparser.parse(data_cells[3].text).date()\n return result", "def read_table(filename, separator=',', dtype='float'):\n\n fp = open(filename, 'r')\n\n headers = fp.readline()\n\n # print \"headers = \", headers\n headers = [h.strip() for h in headers.split(separator)]\n headers.remove('')\n\n #print \"headers = \", headers\n\n columns = [[] for h in headers]\n #table = dict.fromkeys(headers, [])\n\n #table = Container.fromkeys(headers, [])\n\n #print \"table = \", table\n\n for line in fp.readlines():\n\n values = [h.strip() for h in line.split(separator)]\n values.remove('')\n\n #print \"values = \", values\n\n for k, v in enumerate(values):\n\n #print k, \" = \", v\n\n\n if dtype == \"float\":\n v = float(v)\n\n columns[k].append(v)\n #table[k].append(v)\n\n table = Container(**dict(list(zip(headers, columns))))\n table.headers = headers\n\n return table", "def isTable(line):\n # Make the string easier to parse.\n line_stripped = lineNormalise(line)\n\n # Return value.\n ret = False\n\n # If the line starts with the word table, we have a table definition!\n if line_stripped.startswith('table'):\n ret = True\n\n # Tell the horrible truth that this code could not find a table.\n return ret", "def process_soup(soup):\n if soup == None:\n return\n\n games = soup.findAll('tr')\n\n stats = []\n for gamerow in games:\n cells = [i for i in gamerow.findChildren('td')]\n if len(cells) == 0:\n continue\n if cells[0].string == \"G\":\n continue\n \n gamenum = cells[0].string\n \n try:\n gamedate = cells[1].firstText().string\n except:\n gamedate = cells[1].string\n\n home = cells[2].string != \"@\"\n opp = cells[3].get('csk')[:3]\n winloss = cells[4].string\n pf = cells[6].string\n pa = cells[7].string\n wins = cells[8].string\n losses = cells[9].string\n streak = cells[10].string\n notes = cells[11].string\n if notes == None:\n notes = \"\"\n\n stats.append([gamenum, gamedate, home, opp, winloss,\n pf, pa, wins, losses, streak, notes])\n \n return stats", "def process_soup(soup):\n if soup == None:\n return\n\n games = soup.findAll('tr')\n\n stats = []\n for gamerow in games:\n cells = [i for i in gamerow.findChildren('td')]\n if len(cells) == 0:\n continue\n if cells[0].string == \"G\":\n continue\n \n gamenum = cells[0].string\n \n try:\n gamedate = cells[1].firstText().string\n except:\n gamedate = cells[1].string\n\n home = cells[2].string != \"@\"\n opp = cells[3].get('csk')[:3]\n winloss = cells[4].string\n pf = cells[6].string\n pa = cells[7].string\n wins = cells[8].string\n losses = cells[9].string\n streak = cells[10].string\n notes = cells[11].string\n if notes == None:\n notes = \"\"\n\n stats.append([gamenum, gamedate, home, opp, winloss,\n pf, pa, wins, losses, streak, notes])\n \n return stats", "def _process(self, tables=None):\n\n if self._tables:\n return self._tables\n\n tables = tables or {}\n\n for row in self.url.generator.iter_rp:\n\n table_id_key = row['Table ID'].strip().lower()\n\n if not row['Line Number'].strip():\n if 'Universe' not in row['Table Title']:\n if table_id_key not in tables:\n tables[table_id_key] = Table(row['Table ID'], row['Table Title'].strip().title(),\n seq=row['Sequence Number'],\n startpos=int(row['Start Position']))\n else:\n tables[table_id_key].seq = row['Sequence Number']\n tables[table_id_key].startpos = row['Start Position']\n tables[table_id_key].subject = row['Subject Area']\n\n else:\n tables[table_id_key].universe = row['Table Title'].replace('Universe: ', '').strip()\n\n else: # column row\n try:\n\n line_no = int(row['Line Number'])\n\n if not line_no in tables[table_id_key].columns:\n tables[table_id_key].columns[line_no] = Column(row['Table ID'],\n f\"{row['Table ID']}_{line_no:03}\",\n line_no,\n description=row['Table Title'])\n else:\n tables[table_id_key].columns[line_no].description = row['Table Title']\n\n\n except ValueError as e:\n # Headings, which have fractional line numebrs\n # print(row)\n pass\n\n self._tables = tables\n\n return self._tables", "def converttable(tablecode):\n table = etree.XML(tablecode)\n rows = iter(table)\n headers = [col.text for col in next(rows)]\n data = []\n for row in rows:\n values = [col.text for col in row]\n debugprint(dict(zip(headers, values)), \"RAW JSON\")\n data.append(dict(zip(headers, values)))\n return data", "def get_table(new_arr, types, titles):\n try:\n table = agate.Table(new_arr, titles, types)\n return table\n except Exception as e:\n print e", "def start_table(self):\n self.result = \"<table>\\n\"", "def test_parse_classic_otu_table(self):\r\n data = self.otu_table1\r\n data_f = (data.split('\\n'))\r\n obs = parse_classic_otu_table(data_f)\r\n exp = (['Fing', 'Key', 'NA'],\r\n ['0', '1', '2', '3', '4'],\r\n array([[19111, 44536, 42], [1216, 3500, 6], [1803, 1184, 2],\r\n [1722, 4903, 17], [589, 2074, 34]]),\r\n self.expected_lineages1)\r\n self.assertEqual(obs[0], exp[0])\r\n self.assertEqual(obs[1], exp[1])\r\n assert_almost_equal(obs[2], exp[2])\r\n self.assertEqual(obs[3], exp[3])\r\n\r\n\r\n # test that the modified parse_classic performs correctly on OTU tables\r\n # without leading comments\r\n data = self.otu_table_without_leading_comment\r\n data_f = (data.split('\\n'))\r\n obs = parse_classic_otu_table(data_f)\r\n sams = ['let-7i', 'miR-7', 'miR-17n', 'miR-18a', 'miR-19a', 'miR-22',\r\n 'miR-25', 'miR-26a']\r\n otus = ['A2M', 'AAAS', 'AACS', 'AADACL1']\r\n vals = array([\r\n [-0.2, 0.03680505, 0.205, 0.23, 0.66, 0.08, -0.373, 0.26],\r\n [-0.09, -0.25, 0.274, 0.15, 0.12, 0.29, 0.029, -0.1148452],\r\n [0.33, 0.19, 0.27, 0.28, 0.19, 0.25, 0.089, 0.14],\r\n [0.49, -0.92, -0.723, -0.23, 0.08, 0.49, -0.386, -0.64]])\r\n exp = (sams, otus, vals, []) # no lineages\r\n # because float comps in arrays always errors\r\n self.assertEqual(obs[0], exp[0])\r\n self.assertEqual(obs[1], exp[1])\r\n self.assertEqual(obs[3], exp[3])\r\n self.assertTrue(all((obs[2] == exp[2]).tolist()))", "def extract_data(table, start_row=1, end_row=None):\n # Define a list of characters in the table that we want to ignore\n characters = ['(', ')', '*']\n\n datas = []\n skip_header = True\n for i, row in enumerate(table.data[start_row:end_row]):\n if skip_header:\n # Read the first item of each row to determine if we're passed the\n # header and into the table data. We know the first row of data\n # begins with a number (or a fraction).\n try:\n convert_to_float(row[0])\n except ValueError:\n continue\n skip_header = False\n data = []\n for j, item in enumerate(row):\n # We don't want new line characters\n if '\\n' in item:\n item = item.split('\\n')[-1]\n # Skip and unwanted characters\n if any(c in item for c in characters):\n continue\n # Manage blank data - there's rogue columns in the read data\n if item == '':\n if j == 0:\n data.append(datas[-1][j])\n else:\n continue\n # If it's a number add to the data, otherwise add a NaN\n else:\n try:\n data.append(\n convert_to_float(item)\n )\n except ValueError:\n data.append(np.nan)\n # Remove and rows of NaNs\n if np.all(np.isnan(np.array(data)[1:])):\n continue\n else:\n datas.append(data)\n return datas", "def process_ipet_table(table, repres, add_ind=False, swap=False):\n # split rowspan cells from the tables body to enable js datatable\n table_rows = [e for e in table.find(\".//tbody\").iter() if e.tag == \"tr\" or e.tag == \"th\"]\n groupcount = 1\n oldtext = \"\"\n for row in table_rows:\n cellcount = 0\n for cell in row.iter():\n if add_ind and cellcount == 1 and cell.tag == \"th\" and cell.text != oldtext:\n cell.text = \"{:0>2d}. {}\".format(groupcount, cell.text)\n oldtext = cell.text\n groupcount = groupcount + 1\n rowspan = cell.get(\"rowspan\")\n if rowspan is not None:\n del cell.attrib[\"rowspan\"]\n nextrow = row\n for i in range(int(rowspan) - 1):\n nextrow = nextrow.getnext()\n newcell = html.fromstring(html.tostring(cell))\n nextrow.insert(cellcount - 1, newcell)\n cellcount = cellcount + 1\n\n # render to string and make the dataTable fit the width\n htmltable = html.tostring(table).decode(\"utf-8\")\n # replace ids and so on\n htmltable = replace_in_str(htmltable, repres)\n htmltable = htmltable.replace(\"nan\", NONE_DISPLAY)\n return htmltable", "def read_opl_text(tdf,text, commaseperator = True):\n verify(stringish(text), \"text needs to be a string\")\n # probably want to verify something about the ticdat factory, look at the wiki\n dict_with_lists = defaultdict(list)\n NONE, TABLE, ROW, ROWSTRING, ROWNUM, FIELD, STRING, NUMBER = 1, 2, 3, 4, 5, 6, 7, 8\n mode = NONE\n field = ''\n table_name = ''\n row = []\n\n def to_number(st, pos):\n try:\n return float(st)\n except ValueError:\n verify(False,\n \"Badly formatted string - Field '%s' is not a valid number. Character position [%s].\" % (st, pos))\n\n for i,c in enumerate(text):\n if mode not in [STRING, ROWSTRING] and (c.isspace() or c == '{' or c == ';'):\n if mode in [NUMBER, ROWNUM, FIELD] and not commaseperator:\n c = ','\n else:\n continue\n if mode in [STRING, ROWSTRING]:\n if c == '\"':\n if text[i-1] == '\\\\':\n field = field[:-1] + '\"'\n else:\n if mode is ROWSTRING:\n row.append(field)\n field = ''\n verify(len(row) == len((dict_with_lists[table_name] or [row])[0]),\n \"Inconsistent row lengths found for table %s\" % table_name)\n dict_with_lists[table_name].append(row)\n row = []\n mode = TABLE\n else:\n mode = FIELD\n else:\n field += c\n elif c == '=':\n verify(mode is NONE, \"Badly formatted string, unrecognized '='. Character position [%s]\"%i)\n verify(len(table_name) > 0, \"Badly formatted string, table name can't be blank. Character position [%s]\"%i)\n verify(table_name not in dict_with_lists.keys(), \"Can't have duplicate table name. [Character position [%s]\"%i)\n dict_with_lists[table_name] = []\n mode = TABLE\n elif c == '<':\n verify(mode is TABLE, \"Badly formatted string, unrecognized '<'. Character position [%s]\"%i)\n mode = ROW\n\n elif c == ',':\n verify(mode in [ROW, FIELD, NUMBER, ROWNUM, TABLE], \"Badly formatted string, unrecognized ','. \\\n Character position [%s]\"%i)\n if mode is TABLE:\n continue\n if mode is ROWNUM:\n field = to_number(field,i)\n row.append(field)\n field = ''\n verify(len(row) == len((dict_with_lists[table_name] or [row])[0]),\n \"Inconsistent row lengths found for table %s\" % table_name)\n dict_with_lists[table_name].append(row)\n row = []\n mode = TABLE\n else:\n if mode is NUMBER:\n field = to_number(field,i)\n row.append(field)\n field = ''\n mode = ROW\n\n elif c == '\"':\n verify(mode in [ROW, TABLE], \"Badly formatted string, unrecognized '\\\"'. Character position [%s]\"%i)\n if mode is ROW:\n mode = STRING\n if mode is TABLE:\n mode = ROWSTRING\n\n elif c == '}':\n verify(mode in [TABLE, ROWNUM], \"Badly formatted string, unrecognized '}'. Character position [%s]\"%i)\n if mode is ROWNUM:\n field = to_number(field,i)\n row.append(field)\n field = ''\n verify(len(row) == len((dict_with_lists[table_name] or [row])[0]),\n \"Inconsistent row lengths found for table %s\" % table_name)\n dict_with_lists[table_name].append(row)\n row = []\n table_name = ''\n mode = NONE\n\n elif c == '>':\n verify(mode in [ROW, FIELD, NUMBER], \"Badly formatted string, unrecognized '>'. \\\n Character position [%s]\"%i)\n if mode is NUMBER:\n field = to_number(field,i)\n mode = FIELD\n if mode is FIELD:\n row.append(field)\n field = ''\n verify(len(row) == len((dict_with_lists[table_name] or [row])[0]),\n \"Inconsistent row lengths found for table %s\"%table_name)\n dict_with_lists[table_name].append(row)\n row = []\n mode = TABLE\n else:\n verify(mode in [NONE, ROW, ROWNUM, FIELD, NUMBER], \"Badly formatted string, \\\n unrecognized '%s'. Character position [%s]\"%(c,i))\n if mode is NONE:\n table_name += c\n elif mode is TABLE:\n mode = ROWNUM\n field += c\n else:\n mode = NUMBER\n field += c\n assert not find_duplicates_from_dict_ticdat(tdf, dict_with_lists), \\\n \"duplicates were found - if asserts are disabled, duplicate rows will overwrite\"\n\n return tdf.TicDat(**{k.replace(tdf.opl_prepend,\"\",1):v for k,v in dict_with_lists.items()})", "def read_table_data(self, table):\n data = []\n index = 0\n for row in table.rows:\n data.append([])\n for cell in row.cells:\n text_data = ''\n for para in cell.paragraphs:\n text_data += para.text.strip(' ')\n data[index].append(text_data)\n index += 1\n\n # trim unneeded rows in old & new reports\n if all('CAPA' in x for x in data[0]):\n self.table_data = data[2:]\n else:\n self.table_data = data[1:]\n # trim end of list\n self.table_data = [row[:5] for row in self.table_data]", "def convert_quick_table(result):\n headline = result.split('\\n',1)[0]\n names, converters = MastCasJobs.get_converters(headline, delimiter=',')\n tab = ascii.read(MastCasJobs.replacenull(result,delimiter=','),\n guess=False,fast_reader=False,format='csv',\n names=names,converters=converters)\n return tab", "def _parse_rows(rows, header='infer'):\n if not rows:\n raise ValueError('rows={0} is invalid'.format(rows))\n rows = copy.copy(rows)\n label = rows[0][0].replace(' ', '_').lower()\n\n if header == 'infer':\n if len(rows) >= 3:\n if _infer_dtype(rows[1][-1]) != _infer_dtype(rows[2][-1]):\n header = True\n else:\n header = False\n else:\n header = False\n if header is True:\n colnames = rows[1]\n data_idx = 2\n else:\n colnames = None\n data_idx = 1\n\n data_dtypes = [_infer_dtype(val) for val in rows[data_idx]]\n if any(dd == 'pct' for dd in data_dtypes):\n label += '_pct'\n\n parsed_rows = []\n for row in rows[data_idx:]:\n vals = [_convert_val(val, dtype) for val, dtype in zip(row, data_dtypes)]\n if colnames:\n parsed_rows.append({colname:val for colname, val in zip(colnames, vals)})\n else:\n parsed_rows.append(vals)\n\n return label, parsed_rows", "def _read_table(hdulist, extname, **kwargs):\n t = _read_ext(Table, hdulist, extname, **kwargs)\n h = hdulist[extname].header\n for i in range(h['TFIELDS']):\n try:\n t.columns[i].unit = h['TUNIT%d' % (i + 1)]\n except Exception:\n pass\n return t", "def test_parse_classic_otu_table_file(self):\r\n data = \"\"\"#Full OTU Counts\r\n#OTU ID\tFing\tKey\tNA\tConsensus Lineage\r\n0\t19111\t44536\t42\tBacteria; Actinobacteria; Actinobacteridae; Propionibacterineae; Propionibacterium\r\n1\t1216\t3500\t6\tBacteria; Firmicutes; Alicyclobacillaceae; Bacilli; Lactobacillales; Lactobacillales; Streptococcaceae; Streptococcus\r\n2\t1803\t1184\t2\tBacteria; Actinobacteria; Actinobacteridae; Gordoniaceae; Corynebacteriaceae\r\n3\t1722\t4903\t17\tBacteria; Firmicutes; Alicyclobacillaceae; Bacilli; Staphylococcaceae\r\n4\t589\t2074\t34\tBacteria; Cyanobacteria; Chloroplasts; vectors\"\"\"\r\n data_f = StringIO(data)\r\n obs = parse_classic_otu_table(data_f)\r\n exp = (['Fing', 'Key', 'NA'],\r\n ['0', '1', '2', '3', '4'],\r\n array([[19111, 44536, 42], [1216, 3500, 6], [1803, 1184, 2],\r\n [1722, 4903, 17], [589, 2074, 34]]),\r\n [['Bacteria', 'Actinobacteria', 'Actinobacteridae', 'Propionibacterineae', 'Propionibacterium'],\r\n ['Bacteria',\r\n 'Firmicutes',\r\n 'Alicyclobacillaceae',\r\n 'Bacilli',\r\n 'Lactobacillales',\r\n 'Lactobacillales',\r\n 'Streptococcaceae',\r\n 'Streptococcus'],\r\n ['Bacteria',\r\n 'Actinobacteria',\r\n 'Actinobacteridae',\r\n 'Gordoniaceae',\r\n 'Corynebacteriaceae'],\r\n ['Bacteria',\r\n 'Firmicutes',\r\n 'Alicyclobacillaceae',\r\n 'Bacilli',\r\n 'Staphylococcaceae'],\r\n ['Bacteria', 'Cyanobacteria', 'Chloroplasts', 'vectors']])\r\n self.assertEqual(obs[0], exp[0])\r\n self.assertEqual(obs[1], exp[1])\r\n assert_almost_equal(obs[2], exp[2])\r\n self.assertEqual(obs[3], exp[3])", "def addTableRow(self, t):\r\n\r\n # Exits if no table was started\r\n assert self.tableHeader, \"No table was started\"\r\n\r\n # Retrieves table headers list depending of nesting level\r\n if self.subTableHeader != None:\r\n headers = self.subTableHeader\r\n indent = \" \"\r\n else:\r\n headers = self.tableHeader\r\n indent = \"\"\r\n\r\n # Adds the row in the resulting text\r\n self.text += \"<tr>\"\r\n for s in t:\r\n self.text += \"<td>\" + self.getHTMLText(s) + \"</td>\"\r\n self.text += \"</tr>\\n\"\r\n\r\n # Prints text on standard output using the table headers\r\n if self.verbosity >= 1 :\r\n for i in range(len(t)):\r\n print indent, headers[i], \" : \", t[i]\r\n print \"\"", "def body(self) -> ComponentTableBody:\n res = []\n raw_rows = self.wait_for_elements_by_tag_name('tr')[1:]\n\n for row in raw_rows:\n res.append(ComponentTableRow(row))\n\n return ComponentTableBody(res)", "def parse_sp500_wiki_page(html_page):\n soup = BeautifulSoup(html_page, 'html.parser')\n table = soup.find(\"table\", {\"id\": \"constituents\"})\n \n data = []\n column_names = [col_name.text.strip() for col_name in table.find_all('th')]\n for row in table.find_all('tr'):\n data_row = [col_name.text.strip() for col_name in row.find_all('td')]\n if data_row:\n data.append(data_row)\n return data, column_names" ]
[ "0.6638918", "0.65049", "0.640189", "0.6186907", "0.61552876", "0.5986234", "0.59336215", "0.5882514", "0.5852241", "0.5727533", "0.5713803", "0.57028484", "0.56890404", "0.5629742", "0.5603507", "0.55797344", "0.5575111", "0.55562884", "0.55540824", "0.5548597", "0.5501241", "0.54935247", "0.547061", "0.53886485", "0.5329169", "0.5306047", "0.5301207", "0.5284271", "0.52577853", "0.5255112", "0.5242104", "0.5229749", "0.5227734", "0.5222719", "0.5201888", "0.51819164", "0.5176325", "0.5162325", "0.51562715", "0.5154036", "0.5134055", "0.5115045", "0.5091035", "0.50726277", "0.5072186", "0.50718296", "0.50567555", "0.50455016", "0.5007566", "0.49979675", "0.4980205", "0.49635622", "0.49385613", "0.49380562", "0.4902912", "0.48968494", "0.48961854", "0.48899257", "0.48895094", "0.48857668", "0.48851317", "0.48717102", "0.48669222", "0.48443142", "0.4830624", "0.48180202", "0.48124388", "0.4810751", "0.4793436", "0.47922295", "0.47777233", "0.47761518", "0.47714937", "0.4768775", "0.47651467", "0.47533333", "0.4737187", "0.47335282", "0.4725906", "0.47220498", "0.47147733", "0.47078997", "0.46999452", "0.46999452", "0.46989372", "0.4697192", "0.4693873", "0.46872714", "0.46827486", "0.46727404", "0.46697792", "0.46590683", "0.46546465", "0.46532404", "0.46460527", "0.4644414", "0.4634674", "0.46338934", "0.4632332", "0.46291855" ]
0.6397043
3
Peeks ahead nonintrusively by cloning then restoring the initial state of the parser. Returns the name of the table about to be parsed, as well as whether it is part of an AoT.
def _peek_table(self): # type: () -> Tuple[bool, str] # Save initial state idx = self._save_idx() marker = self._marker if self._current != "[": raise self.parse_error( InternalParserError, ("_peek_table() entered on non-bracket character") ) # AoT self.inc() is_aot = False if self._current == "[": self.inc() is_aot = True self.mark() while self._current != "]" and self.inc(): table_name = self.extract() # Restore initial state self._restore_idx(*idx) self._marker = marker return is_aot, table_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_aot(self, first, name_first): # type: (Item, str) -> Item\n payload = [first]\n self._aot_stack.append(name_first)\n while not self.end():\n is_aot_next, name_next = self._peek_table()\n if is_aot_next and name_next == name_first:\n _, table = self._parse_table()\n payload.append(table)\n else:\n break\n\n self._aot_stack.pop()\n\n return AoT(payload)", "def parse_table(self, table_name):\n table_offset = self.catalog.get(table_name)\n if not table_offset:\n logging.error(f\"Could not find table {table_name} in DataBase\")\n return\n table_offset = table_offset * self.page_size\n table = self._tables_with_data.get(table_offset)\n if not table:\n table_def = self._table_defs.get(table_offset)\n if table_def:\n table = TableObj(offset=table_offset, val=table_def)\n logging.info(f\"Table {table_name} has no data\")\n else:\n logging.error(f\"Could not find table {table_name} offset {table_offset}\")\n return\n access_table = AccessTable(table, self.version, self.page_size, self._data_pages, self._table_defs)\n return access_table.parse()", "def _parse_table(self): # type: (Optional[str]) -> Tuple[Key, Item]\n indent = self.extract()\n self.inc() # Skip opening bracket\n\n is_aot = False\n if self._current == \"[\":\n if not self.inc():\n raise self.parse_error(UnexpectedEofError)\n\n is_aot = True\n\n # Key\n self.mark()\n while self._current != \"]\" and self.inc():\n pass\n\n name = self.extract()\n key = Key(name, sep=\"\")\n\n self.inc() # Skip closing bracket\n if is_aot:\n # TODO: Verify close bracket\n self.inc()\n\n cws, comment, trail = self._parse_comment_trail()\n\n result = Null()\n values = Container()\n\n while not self.end():\n item = self._parse_item()\n if item:\n _key, item = item\n if not self._merge_ws(item, values):\n values.append(_key, item)\n else:\n if self._current == \"[\":\n _, name_next = self._peek_table()\n\n if self._is_child(name, name_next):\n key_next, table_next = self._parse_table()\n key_next = Key(key_next.key[len(name + \".\") :])\n\n values.append(key_next, table_next)\n\n # Picking up any sibling\n while not self.end():\n _, name_next = self._peek_table()\n\n if not self._is_child(name, name_next):\n break\n\n key_next, table_next = self._parse_table()\n key_next = Key(key_next.key[len(name + \".\") :])\n\n values.append(key_next, table_next)\n else:\n table = Table(\n values, Trivia(indent, cws, comment, trail), is_aot\n )\n\n result = table\n if is_aot and (\n not self._aot_stack or name != self._aot_stack[-1]\n ):\n result = self._parse_aot(table, name)\n\n break\n else:\n raise self.parse_error(\n InternalParserError,\n (\"_parse_item() returned None on a non-bracket character.\"),\n )\n\n if isinstance(result, Null):\n result = Table(values, Trivia(indent, cws, comment, trail), is_aot)\n\n return key, result", "def isTable(line):\n # Make the string easier to parse.\n line_stripped = lineNormalise(line)\n\n # Return value.\n ret = False\n\n # If the line starts with the word table, we have a table definition!\n if line_stripped.startswith('table'):\n ret = True\n\n # Tell the horrible truth that this code could not find a table.\n return ret", "def parse_single_table(source, **kwargs):\n if kwargs.get(\"table_number\") is None:\n kwargs[\"table_number\"] = 0\n\n votable = parse(source, **kwargs)\n\n return votable.get_first_table()", "def _extract_raw_table(self, expr):\n str_start = \"<table\"\n str_end = \"/table>\"\n\n ind_start = expr.find(str_start)\n assert ind_start >= 0\n\n ind_end = expr.find(str_end)\n assert ind_end >= 0\n\n return expr[ind_start: ind_end + len(str_end)]", "def test_peek_none(self):\n nt = NewickTokenizer(newick='(a,(b,c));')\n nt.tokens()\n self.assertIsNone(nt._peek())", "def getTAPTables(self):\n\t\treturn [r[\"tablename\"] for r in\n\t\t\tself.readerConnection.queryToDicts(\n\t\t\t\t\"select tablename from dc.tablemeta where adql\")]", "def _seek_to_table(self, table):\n\n self.stream.seek(self.table_pointers[table])", "def test_rt_table(self) -> None:\n expected = Fixtures.next_table()\n expected.description = '\"hello!\" said no one'\n expected.tags.sort()\n\n self.get_proxy().put_table(table=expected)\n actual: Table = self.get_proxy().get_table(table_uri=checkNotNone(expected.key))\n actual.last_updated_timestamp = None\n actual.tags.sort()\n\n self.assertEqual(expected, actual)", "def testLR0ParseTable(self):\r\n from pydsl.Parser.LR0 import _slr_build_parser_table, build_states_sets\r\n state_sets = build_states_sets(productionset0)\r\n self.assertEqual(len(state_sets), 5)\r\n #0 . EI: : . exp $ , \r\n # exp : .SR\r\n # transitions: S -> 2,\r\n # goto: exp -> 1\r\n #1 EI: exp . $ ,\r\n # transitions: $ -> 3\r\n #2 exp: S . R,\r\n # transitions: R -> 4\r\n #3 EI: exp $ .\r\n #4 exp: S R .\r\n # reduce\r\n\r\n parsetable = _slr_build_parser_table(productionset0)\r\n self.assertEqual(len(parsetable), 4)", "def _first_page_or_table(attr):\n return bool(get_page(attr) or attr.sentence.is_tabular())", "def go_home(node):\n if node.attr('t').isSettable():\n node.setAttr('t', (0, 0, 0))\n if node.attr('r').isSettable():\n node.setAttr('r', (0, 0, 0))\n if node.attr('s').isSettable():\n node.setAttr('s', (1, 1, 1))", "def catch_tabs(self):\n lnum = 1\n for line in self.text:\n cnum = line.find(\"\\t\")\n if 0 <= cnum:\n self.errmsg(\"TAB detected in input. Please use spaces.\",\n pos=(lnum,cnum))\n lnum += 1", "def processJumpTable(jt_ea):", "def atHead(self):\n return self.cursor == self.head", "def test_tabs():\n for _, modname, ispkg in walk_packages(mne.__path__, prefix=\"mne.\"):\n # because we don't import e.g. mne.tests w/mne\n if not ispkg and modname not in tab_ignores:\n try:\n mod = importlib.import_module(modname)\n except Exception: # e.g., mne.export not having pybv\n continue\n source = inspect.getsource(mod)\n assert \"\\t\" not in source, (\n '\"%s\" has tabs, please remove them '\n \"or add it to the ignore list\" % modname\n )", "def extract_ks_tab(name):\n if not name:\n return None, None\n\n sp = name.split(\".\")\n if len(sp) == 2:\n ksp = sp[0]\n table = sp[1]\n else:\n ksp = config.execution_name\n table = name\n return ksp.lower(), table.lower()", "def start_table(self):\n raise NotImplementedError", "def _extract_ks_tab(name):\n sp = name.split(\".\")\n if len(sp) == 2:\n ksp = sp[0]\n table = sp[1]\n else:\n ksp = config.execution_name\n table = name\n return ksp.lower().encode('UTF8'), table.lower().encode('UTF8')", "def peek_table (db, name):\n count = '''SELECT COUNT (*) FROM {table}'''.format (table=name)\n display (pandas.read_sql_query (count, db))\n peek = '''SELECT * FROM {table} LIMIT 5'''.format (table=name)\n display (pandas.read_sql_query (peek, db))", "def find_table(input_file):\n contents = open(input_file, 'r').readlines()\n title = []\n for line in contents:\n if 'CREATE TABLE' in line:\n T = re.search('CREATE TABLE (.+?) \\(',line).group(1).strip('\\\"')\n title.append(T)\n if len(title) != 0:\n return True, title\n else:\n return False, title", "def has_table(self, name: str) -> bool:\n try:\n self.execute(\"select * from {table} limit 1\", name)\n return True\n except sqlite3.OperationalError:\n return False", "def fromtab(args):\n p = OptionParser(fromtab.__doc__)\n p.set_sep(sep=None)\n p.add_option(\n \"--noheader\", default=False, action=\"store_true\", help=\"Ignore first line\"\n )\n p.add_option(\"--replace\", help=\"Replace spaces in name to char\")\n opts, args = p.parse_args(args)\n\n if len(args) != 2:\n sys.exit(not p.print_help())\n\n tabfile, fastafile = args\n sep = opts.sep\n replace = opts.replace\n fp = must_open(tabfile)\n fw = must_open(fastafile, \"w\")\n nseq = 0\n if opts.noheader:\n next(fp)\n for row in fp:\n row = row.strip()\n if not row or row[0] == \"#\":\n continue\n\n name, seq = row.rsplit(sep, 1)\n if replace:\n name = name.replace(\" \", replace)\n print(\">{0}\\n{1}\".format(name, seq), file=fw)\n nseq += 1\n fw.close()\n\n logging.debug(\"A total of {0} sequences written to `{1}`.\".format(nseq, fastafile))", "def isTable(self, tableName):\n url = '%s/_table/%s' % (self.uri, tableName)\n data, resp = self.execute(method='GET', url=url, decode=True)\n return data", "def write_tables(self):\n parse_dir, parse_mod = self._tables_location()\n # Using optimized = 0 force yacc to compare the parser signature to the\n # parse_tab signature and will update it if necessary.\n yacc.yacc(method='LALR',\n module=self,\n start='enaml',\n tabmodule=parse_mod,\n outputdir=parse_dir,\n optimize=0,\n debug=0,\n errorlog=yacc.NullLogger())", "def state_parsing_toplevel_text(self):\n chars = []\n while self.more():\n if self.peek() in CHAR_COMMENT:\n self.emit(TokenWhitespace, chars)\n return self.state_parsing_comment\n if self.peek() in CHAR_NUMBER:\n self.emit(TokenWhitespace, chars)\n return self.state_parsing_transaction\n if self.confirm_next(\"P\"):\n self.emit(TokenWhitespace, chars)\n return self.state_parsing_price\n if self.confirm_next(\"C\"):\n self.emit(TokenWhitespace, chars)\n return self.state_parsing_conversion\n if self.confirm_next(\"python\"):\n self.emit(TokenWhitespace, chars)\n return self.state_parsing_embedded_python\n if self.confirm_next(\"tag\"):\n self.emit(TokenWhitespace, chars)\n return self.state_parsing_embedded_tag\n if self.peek() not in CHAR_WHITESPACE + CHAR_ENTER:\n _, _, l2, c2 = self._coords()\n raise LexingError(\n \"unparsable data at line %d, char %d\" % (l2, c2)\n )\n chars += [next(self)]\n self.emit(TokenWhitespace, chars)\n return", "def test_parse_classic_otu_table(self):\r\n data = self.otu_table1\r\n data_f = (data.split('\\n'))\r\n obs = parse_classic_otu_table(data_f)\r\n exp = (['Fing', 'Key', 'NA'],\r\n ['0', '1', '2', '3', '4'],\r\n array([[19111, 44536, 42], [1216, 3500, 6], [1803, 1184, 2],\r\n [1722, 4903, 17], [589, 2074, 34]]),\r\n self.expected_lineages1)\r\n self.assertEqual(obs[0], exp[0])\r\n self.assertEqual(obs[1], exp[1])\r\n assert_almost_equal(obs[2], exp[2])\r\n self.assertEqual(obs[3], exp[3])\r\n\r\n\r\n # test that the modified parse_classic performs correctly on OTU tables\r\n # without leading comments\r\n data = self.otu_table_without_leading_comment\r\n data_f = (data.split('\\n'))\r\n obs = parse_classic_otu_table(data_f)\r\n sams = ['let-7i', 'miR-7', 'miR-17n', 'miR-18a', 'miR-19a', 'miR-22',\r\n 'miR-25', 'miR-26a']\r\n otus = ['A2M', 'AAAS', 'AACS', 'AADACL1']\r\n vals = array([\r\n [-0.2, 0.03680505, 0.205, 0.23, 0.66, 0.08, -0.373, 0.26],\r\n [-0.09, -0.25, 0.274, 0.15, 0.12, 0.29, 0.029, -0.1148452],\r\n [0.33, 0.19, 0.27, 0.28, 0.19, 0.25, 0.089, 0.14],\r\n [0.49, -0.92, -0.723, -0.23, 0.08, 0.49, -0.386, -0.64]])\r\n exp = (sams, otus, vals, []) # no lineages\r\n # because float comps in arrays always errors\r\n self.assertEqual(obs[0], exp[0])\r\n self.assertEqual(obs[1], exp[1])\r\n self.assertEqual(obs[3], exp[3])\r\n self.assertTrue(all((obs[2] == exp[2]).tolist()))", "def has_table(self, table_name, timeout):\n _abstract()", "def has_table(self, table_name, timeout):\n _abstract()", "def _get_table_name(url):\n try:\n return urlparse(url).path.strip('/').split('/')[1]\n except IndexError:\n return None", "def parse(self):\n for index in range(len(self.columns)):\n if index in self.columns:\n self.parsed_table[self.columns[index].col_name_str] = []\n if not self.table.linked_pages:\n return self.create_empty_table()\n for data_chunk in self.table.linked_pages:\n original_data = data_chunk\n parsed_data = parse_data_page_header(original_data, version=self.version)\n\n last_offset = None\n for rec_offset in parsed_data.record_offsets:\n # Deleted row - Just skip it\n if rec_offset & 0x8000:\n last_offset = rec_offset & 0xfff\n continue\n # Overflow page\n if rec_offset & 0x4000:\n # overflow ptr is 4 bits flags, 12 bits ptr\n rec_ptr_offset = rec_offset & 0xfff\n # update last pointer to pointer without flags\n last_offset = rec_ptr_offset\n # The ptr is the offset in the current data page. we get a 4 byte record_pointer from that\n overflow_rec_ptr = original_data[rec_ptr_offset:rec_ptr_offset + 4]\n overflow_rec_ptr = struct.unpack(\"<I\", overflow_rec_ptr)[0]\n record = self._get_overflow_record(overflow_rec_ptr)\n if record:\n self._parse_row(record)\n continue\n # First record is actually the last one - from offset until the end of the data\n if not last_offset:\n record = original_data[rec_offset:]\n else:\n record = original_data[rec_offset:last_offset]\n last_offset = rec_offset\n if record:\n self._parse_row(record)\n return self.parsed_table", "def test_fast_tab_with_names(parallel, read_tab):\n content = \"\"\"#\n\\tdecDeg\\tRate_pn_offAxis\\tRate_mos2_offAxis\\tObsID\\tSourceID\\tRADeg\\tversion\\tCounts_pn\\tRate_pn\\trun\\tRate_mos1\\tRate_mos2\\tInserted_pn\\tInserted_mos2\\tbeta\\tRate_mos1_offAxis\\trcArcsec\\tname\\tInserted\\tCounts_mos1\\tInserted_mos1\\tCounts_mos2\\ty\\tx\\tCounts\\toffAxis\\tRot\n-3.007559\\t0.0000\\t0.0010\\t0013140201\\t0\\t213.462574\\t0\\t2\\t0.0002\\t0\\t0.0001\\t0.0001\\t0\\t1\\t0.66\\t0.0217\\t3.0\\tfakeXMMXCS J1413.8-0300\\t3\\t1\\t2\\t1\\t398.000\\t127.000\\t5\\t13.9\\t72.3\\t\"\"\"\n head = [f\"A{i}\" for i in range(28)]\n read_tab(content, data_start=1, parallel=parallel, names=head)", "def tablename(self):\n _, tail = os.path.split(self.url)\n return tail[:-4]", "def getTableByName(self, tablename):\n pass", "def parse(self):\n if self.filename.endswith('.gz'):\n compression = 'gzip'\n elif self.filename.endswith('.bz2'):\n compression = 'bz2'\n else:\n compression = None\n df = pd.read_table(self.filename, compression=compression)\n\n # drop empty column from extra tab\n df.dropna(axis=1, how='all', inplace=True)\n return df", "def get_table_byname(self, aTable):\n if aTable in self._tablesObjects.keys():\n oTable = self._tablesObjects[aTable]\n else:\n oTable = None\n return oTable", "def done_parsing(self):\n # STUDENT\n return (self.input_buffer_len() == 1 ) and (self.stack_len()==1) \n # END STUDENT", "def regenerateTable():\n deleteAll()\n\n # Start generating records from start nodes, and continue generating\n # records for their children until either the bottom of the ANAD_PART_OF\n # tree is reached, or stop nodes are reached.\n\n for perspective in Perspectives.Iterator():\n perspectiveName = perspective.getName()\n starts = PerspectiveAmbits.getStartAmbitForPerspective(perspectiveName)\n stops = PerspectiveAmbits.getStopAmbitForPerspective(perspectiveName)\n startNodeOids = sets.Set(starts.keys())\n stopNodeOids = sets.Set(stops.keys())\n \n #print perspectiveName\n #print startNodeOids\n #print stopNodeOids\n \n startApos = [PartOfs.getPrimaryPathApoForNodeOid(nodeOid)\n for nodeOid in startNodeOids]\n apoList = startApos[:]\n\n while len(apoList) > 0:\n partOf = apoList.pop()\n\n # create POP record for this part of.\n \n pop = AnadPartOfPerspectiveDb.AnadPartOfPerspectiveDbRecord()\n pop.setPerspectiveName(perspectiveName)\n pop.setApoOid(partOf.getOid())\n pop.setIsAncestor(False)\n pop.setNodeOid(partOf.getNodeOid())\n pop.insert()\n \n #if partOf.getOid() == 68470:\n # print \n # print pop.getPerspectiveName()\n # print pop.getApoOid()\n # print pop.isAncestor()\n # print pop.getNodeOid()\n # print\n # print partOf.getOid()\n # print partOf.getSpecies()\n # print partOf.getNodeStartStageOid()\n # print partOf.getNodeEndStageOid()\n # print partOf.getPathStartStageOid()\n # print partOf.getPathEndStageOid()\n # print partOf.getNodeOid()\n # print partOf.getSequence()\n # print partOf.getDepth()\n # print partOf.getFullPathEmapas()\n # print partOf.getFullPath()\n # print partOf.isPrimaryPath()\n # print partOf.getParentApoOid()\n\n _addToKnowledge(pop)\n\n # if this is not a stop node, then add all its part-of kids\n # to the list of APOs to generate POP records for.\n if partOf.getNodeOid() not in stopNodeOids:\n apoList.extend(PartOfs.getByParentOid(partOf.getOid()))\n\n # for each start node, add any ancestor APOs that were not added\n # by the above process.\n ancesApos = sets.Set()\n for apo in startApos:\n parentApoOid = apo.getParentApoOid()\n if parentApoOid != None:\n parentApo = PartOfs.getByOid(parentApoOid)\n if (_byApoOid.get(parentApoOid) == None or\n _byApoOid[parentApoOid].get(perspectiveName) == None):\n ancesApos.add(parentApo)\n\n while len(ancesApos) > 0:\n ancesApo = ancesApos.pop()\n # create POP record for this ancestor\n pop = AnadPartOfPerspectiveDb.AnadPartOfPerspectiveDbRecord()\n pop.setPerspectiveName(perspectiveName)\n pop.setApoOid(ancesApo.getOid())\n pop.setIsAncestor(True)\n pop.setNodeOid(ancesApo.getNodeOid())\n pop.insert()\n _addToKnowledge(pop)\n\n # if this APO has a parent that hasn't yet been processed then\n # add it to list of ancestor APOs to generate records for.\n parentApoOid = ancesApo.getParentApoOid()\n if (parentApoOid != None and\n (_byApoOid.get(parentApoOid) == None or\n _byApoOid[parentApoOid].get(perspectiveName) == None)):\n parentApo = PartOfs.getByOid(parentApoOid)\n ancesApos.add(parentApo)\n \n \n \n return", "def test_parse_taxa_summary_table(self):\r\n actual = parse_taxa_summary_table(self.taxa_summary1.split('\\n'))\r\n self.assertItemsEqual(actual[0], self.taxa_summary1_expected[0])\r\n self.assertItemsEqual(actual[1], self.taxa_summary1_expected[1])\r\n assert_almost_equal(actual[2], self.taxa_summary1_expected[2])", "def test_parse_classic_otu_table_legacy(self):\r\n data = self.legacy_otu_table1\r\n data_f = (data.split('\\n'))\r\n obs = parse_classic_otu_table(data_f)\r\n exp = (['Fing', 'Key', 'NA'],\r\n ['0', '1', '2', '3', '4'],\r\n array([[19111, 44536, 42], [1216, 3500, 6], [1803, 1184, 2],\r\n [1722, 4903, 17], [589, 2074, 34]]),\r\n self.expected_lineages1)\r\n self.assertEqual(obs[0], exp[0])\r\n self.assertEqual(obs[1], exp[1])\r\n assert_almost_equal(obs[2], exp[2])\r\n self.assertEqual(obs[3], exp[3])", "def __call__(self):\n try:\n _ = self.engine.table_names()\n except OperationalError:\n return False\n else:\n return True", "def table(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"table\")", "def test_read_empty_basic_table_with_comments(fast_reader):\n dat = \"\"\"\n # comment 1\n # comment 2\n col1 col2\n \"\"\"\n t = ascii.read(dat, fast_reader=fast_reader)\n assert t.meta[\"comments\"] == [\"comment 1\", \"comment 2\"]\n assert len(t) == 0\n assert t.colnames == [\"col1\", \"col2\"]", "def _get_table(self):\n\t\treturn self._table", "def ofTable(self, tablename):\n return tablename == self._table.name", "def test_peek_shows_value_of_current_head(dq_3):\n assert dq_3.peek_left() == 'snine'", "def find_temp_tables(sql):\n lines = sql.split(\"\\n\")\n pattern = r\"^.*into\\s+#.*$\"\n r = re.compile(pattern, re.MULTILINE)\n\n tables=[]\n for match in r.findall(sql):\n table_start = match.find(\"#\")\n \n tables.append(match[table_start:])\n\n return tables", "def test_open_ped_tabs(self):\n \n self.temp.write('A\\tB\\t0\\t0\\t1\\t1\\n')\n self.temp.flush()\n families = open_ped(self.temp.name)\n \n fam = Family('A')\n fam.add_person(Person('A', 'B', '0', '0', '1', '1'))\n \n self.assertEqual(families[0].nodes, fam.nodes)", "def test_whitespace_before_comment(parallel, read_tab):\n text = \"a\\tb\\tc\\n # comment line\\n1\\t2\\t3\"\n table = read_tab(text, parallel=parallel)\n expected = Table([[1], [2], [3]], names=(\"a\", \"b\", \"c\"))\n assert_table_equal(table, expected)", "def next(self):\n self.curr_table += 1\n\n if self.curr_table == self.numtables:\n raise StopIteration()\n else:\n return self.getTableByIndex(self.curr_table)", "def peek(self):", "def check_table(cur, table: str) -> bool:\n table_data = cur.execute(f\"\"\"\n SELECT name \n FROM sqlite_master \n WHERE type='table' \n AND name='{table}'\n \"\"\")\n table_fetch = table_data.fetchall()\n if table_fetch:\n return True\n return False", "def next(self):\n lines = []\n query = False\n while 1:\n line = self._uhandle.readline()\n if not line:\n break\n # If I've reached the next one, then put the line back and stop.\n if lines and (line.startswith('BLAST')\n or line.startswith('BLAST', 1)\n or line.startswith('<?xml ')):\n self._uhandle.saveline(line)\n break\n # New style files ommit the BLAST line to mark a new query:\n if line.startswith(\"Query=\"):\n if not query:\n if not self._header:\n self._header = lines[:]\n query = True\n else:\n #Start of another record\n self._uhandle.saveline(line)\n break\n lines.append(line)\n\n if query and \"BLAST\" not in lines[0]:\n #Cheat and re-insert the header\n #print \"-\"*50\n #print \"\".join(self._header)\n #print \"-\"*50\n #print \"\".join(lines)\n #print \"-\"*50\n lines = self._header + lines\n \n if not lines:\n return None\n \n data = ''.join(lines)\n if self._parser is not None:\n return self._parser.parse(File.StringHandle(data))\n return data", "def table_parser(table_files, study, outdir, timepoint=None, dtype=\"wide\",\n auto_type=False):\n # Welcome\n print(\"Starting tables parsing...\")\n\n # Check inputs\n if dtype not in (\"wide\", \"long\"):\n raise ValueError(\"Unexpected data type '{0}'.\".format(dtype))\n\n # Parse all the tables\n tables = []\n with progressbar.ProgressBar(max_value=len(table_files),\n redirect_stdout=True) as bar:\n for cnt, path in enumerate(table_files):\n\n # Open the TSV table\n with open(path, \"rt\") as open_file:\n raw_table = open_file.readlines()\n header = raw_table[0].rstrip(\"\\n\").split(\"\\t\")\n table_content = []\n for row in raw_table[1:]:\n row = row.rstrip(\"\\n\").split(\"\\t\")\n if auto_type:\n raise NotImplementedError(\n \"The automatic typing of columns has not been yet \"\n \"implemented.\")\n table_content.append(row)\n\n # Generate the final structure\n table = {}\n qname = os.path.basename(path).replace(\".tsv\", \"\")\n center = DEFAULT_CENTER\n if timepoint is None:\n timepoint = DEFAULT_TIMEPOINT\n for row_cnt, row in enumerate(table_content):\n assessment_id = \"{0}_q{1}_{2}\".format(\n study.lower(), qname, timepoint)\n subject = row[0].replace(\"sub-\", \"\")\n if dtype == \"wide\":\n assessment_id = \"{0}_{1}\".format(\n assessment_id, row_cnt + 1)\n assessment_id = \"{0}_{1}\".format(assessment_id, subject)\n\n # Create assessment structure\n assessment_struct = {\n \"identifier\": assessment_id,\n \"timepoint\": timepoint}\n\n # Build the subject questionnaires structure for this timepoint\n subj_questionnaires = {\n \"Questionnaires\": OrderedDict(),\n \"Assessment\": assessment_struct\n }\n\n # Fill the questionnaire structure\n qdata = OrderedDict()\n for question, answer in zip(header, row):\n question = question.decode(\"utf-8\", \"ignore\").encode(\n \"utf-8\")\n answer = answer.decode(\"utf-8\", \"ignore\").encode(\"utf-8\")\n qdata[question] = answer\n subj_questionnaires[\"Questionnaires\"][qname] = qdata\n\n # Add this questionnaire to the patient data\n if center not in table:\n table[center] = {}\n if subject not in table[center]:\n table[center][subject] = []\n table[center][subject].append(subj_questionnaires)\n\n # Saving result\n save_parsing(table, outdir, study, \"tables-{0}\".format(qname))\n tables.extend(glob.glob(\n os.path.join(outdir, \"tables-{0}*.json\".format(qname))))\n\n # Update progress bar\n bar.update(cnt)\n\n # Goodbye\n print(\"Done.\")\n\n return tables", "def table_name(self) -> str:\n return \"OLTP\"", "def peek(self):\n pass", "def convert_quick_table(result):\n headline = result.split('\\n',1)[0]\n names, converters = MastCasJobs.get_converters(headline, delimiter=',')\n tab = ascii.read(MastCasJobs.replacenull(result,delimiter=','),\n guess=False,fast_reader=False,format='csv',\n names=names,converters=converters)\n return tab", "def _peek(self):\n return self.token_list[self._current]", "def get_table_info(line):\n\n COMMENT_EXPR = '-- Name: '\n TYPE_EXPR = '; Type: '\n SCHEMA_EXPR = '; Schema: '\n OWNER_EXPR = '; Owner: '\n TABLESPACE_EXPR = '; Tablespace: '\n\n temp = line.strip('\\n')\n type_start = get_all_occurrences(TYPE_EXPR, temp)\n schema_start = get_all_occurrences(SCHEMA_EXPR, temp)\n owner_start = get_all_occurrences(OWNER_EXPR, temp)\n tblspace_start = get_all_occurrences(TABLESPACE_EXPR, temp)\n if len(type_start) != 1 or len(schema_start) != 1 or len(owner_start) != 1:\n return (None, None, None, None)\n name = temp[len(COMMENT_EXPR) : type_start[0]]\n type = temp[type_start[0] + len(TYPE_EXPR) : schema_start[0]]\n schema = temp[schema_start[0] + len(SCHEMA_EXPR) : owner_start[0]]\n if not tblspace_start:\n tblspace_start.append(None)\n owner = temp[owner_start[0] + len(OWNER_EXPR) : tblspace_start[0]]\n return (name, type, schema, owner)", "def peek(self):\n pass", "def identifyTableEntry(line):\n matches = re.findall('<td>', line)\n if len(matches) > 0:\n return True", "def table(self):\n if not self.exists:\n return None\n return self._get_table()", "def parse_tables_xlsx(inp):\n # --------------------------------------------------------------------------\n # Start\n # --------------------------------------------------------------------------\n raw_read = pd.read_excel(inp,sheet_name = None)\n indx = get_tab_index(raw_read)\n # --------------------------------------------------------------------------\n # Get the individual tables from the file\n # --------------------------------------------------------------------------\n tabdict = {}\n for i in indx['tab'].to_list():\n tabdict[i] = get_table_df(raw_read[i])\n # --------------------------------------------------------------------------\n # Finish\n # --------------------------------------------------------------------------\n out = {}\n out['indx'] = indx\n out['tabs'] = tabdict\n return out", "def is_abbr(self, index):\n if self.get_prev_word(index, orignal=True).lower() in self.abbr:\n return True\n else:\n return False", "def _tables(self):\n tabs = set()\n for cond in self._andalso + self._orelse:\n tabs = tabs.union(cond._tables())\n return tabs", "def table_is_empty(self):\n check_query = QSqlQuery(self.reader_connection)\n query = f'SELECT * FROM {self.table_name}'\n check_query.exec(query)\n empty_table = check_query.first()\n check_query.finish()\n return not empty_table", "def extract_tables(node):\n processed_set = set([node])\n table_set = set()\n working_set = [node]\n while working_set:\n elt = working_set.pop(0)\n if isinstance(elt, ML_NewTable):\n table_set.add(elt)\n elif not isinstance(elt, ML_LeafNode):\n for op_node in elt.inputs:\n if not op_node in processed_set:\n processed_set.add(op_node)\n working_set.append(op_node)\n return table_set", "def clean_table(self):\n return False", "def test_make_otu_table_no_taxonomy(self):\r\n otu_map_lines = \"\"\"0\tABC_0\tDEF_1\r\n1\tABC_1\r\nx\tGHI_2\tGHI_3\tGHI_77\r\nz\tDEF_3\tXYZ_1\"\"\".split('\\n')\r\n obs = make_otu_table(otu_map_lines, constructor=DenseOTUTable)\r\n exp = \"\"\"{\"rows\": [{\"id\": \"0\", \"metadata\": null}, {\"id\": \"1\", \"metadata\": null}, {\"id\": \"x\", \"metadata\": null}, {\"id\": \"z\", \"metadata\": null}], \"format\": \"Biological Observation Matrix 0.9dev\", \"data\": [[1, 1, 0, 0], [1, 0, 0, 0], [0, 0, 3, 0], [0, 1, 0, 1]], \"columns\": [{\"id\": \"ABC\", \"metadata\": null}, {\"id\": \"DEF\", \"metadata\": null}, {\"id\": \"GHI\", \"metadata\": null}, {\"id\": \"XYZ\", \"metadata\": null}], \"generated_by\": \"QIIME 1.4.0-dev, svn revision 2532\", \"matrix_type\": \"dense\", \"shape\": [4, 4], \"format_url\": \"http://biom-format.org\", \"date\": \"2011-12-21T00:49:15.978315\", \"type\": \"OTU table\", \"id\": null, \"matrix_element_type\": \"float\"}\"\"\"\r\n self.assertEqual(\r\n parse_biom_table(obs.split('\\n')),\r\n parse_biom_table(exp.split('\\n')))", "def test_table_name(self):\n obs = PrepTemplate._table_name(1)\n self.assertEqual(obs, \"prep_1\")", "def table_check(tablename, path):\n instance = arcno(path)\n tablelist = [i for i,j in instance.actual_list.items()]\n return True if tablename in tablelist else False", "def swap_tables(self):\n if self.stop_before_swap:\n return True\n log.info(\"== Stage 6: Swap table ==\")\n self.stop_slave_sql()\n self.execute_sql(sql.set_session_variable(\"autocommit\"), (0,))\n self.start_transaction()\n stage_start_time = time.time()\n self.lock_tables((self.new_table_name, self.table_name, self.delta_table_name))\n log.info(\"Final round of replay before swap table\")\n self.checksum_required_for_replay = False\n self.replay_changes(single_trx=True, holding_locks=True)\n # We will not run delta checksum here, because there will be an error\n # like this, if we run a nested query using `NOT EXISTS`:\n # SQL execution error: [1100] Table 't' was not locked with LOCK TABLES\n if self.mysql_version.is_mysql8:\n # mysql 8.0 supports atomic rename inside WRITE locks\n self.execute_sql(\n sql.rename_all_tables(\n orig_name=self.table_name,\n old_name=self.renamed_table_name,\n new_name=self.new_table_name,\n )\n )\n self.table_swapped = True\n self.add_drop_table_entry(self.renamed_table_name)\n log.info(\n \"Renamed {} TO {}, {} TO {}\".format(\n self.table_name,\n self.renamed_table_name,\n self.new_table_name,\n self.table_name,\n )\n )\n else:\n self.execute_sql(sql.rename_table(self.table_name, self.renamed_table_name))\n log.info(\n \"Renamed {} TO {}\".format(self.table_name, self.renamed_table_name)\n )\n self.table_swapped = True\n self.add_drop_table_entry(self.renamed_table_name)\n self.execute_sql(sql.rename_table(self.new_table_name, self.table_name))\n log.info(\"Renamed {} TO {}\".format(self.new_table_name, self.table_name))\n\n log.info(\"Table has successfully swapped, new schema takes effect now\")\n self._cleanup_payload.remove_drop_table_entry(\n self._current_db, self.new_table_name\n )\n self.commit()\n self.unlock_tables()\n self.stats[\"time_in_lock\"] = self.stats.setdefault(\"time_in_lock\", 0) + (\n time.time() - stage_start_time\n )\n self.execute_sql(sql.set_session_variable(\"autocommit\"), (1,))\n self.start_slave_sql()\n self.stats[\"swap_table_progress\"] = \"Swap table finishes\"", "def init_table_obj(self):\n # Check the existence of original table\n if not self.table_exists(self.table_name):\n raise OSCError(\n \"TABLE_NOT_EXIST\", {\"db\": self._current_db, \"table\": self.table_name}\n )\n self._old_table = self.fetch_table_schema(self.table_name)\n self.partitions[self.table_name] = self.fetch_partitions(self.table_name)\n # The table after swap will have the same partition layout as current\n # table\n self.partitions[self.renamed_table_name] = self.partitions[self.table_name]\n # Preserve the auto_inc value from old table, so that we don't revert\n # back to a smaller value after OSC\n if self._old_table.auto_increment:\n self._new_table.auto_increment = self._old_table.auto_increment\n # We don't change the storage engine in OSC, so just use\n # the fetched instance storage engine\n self._new_table.engine = self._old_table.engine\n # Populate both old and new tables with explicit charset/collate\n self.populate_charset_collation(self._old_table)\n self.populate_charset_collation(self._new_table)", "def _write_tables(cls):\n path = inspect.getfile(cls)\n parent = os.path.split(path)[0]\n # Need to change directories to get the file written at the right\n # location.\n cwd = os.getcwd()\n os.chdir(parent)\n tabname = cls._table_name('lex', relative=True)\n lex.lex(object=cls, lextab=tabname, optimize=True, debug=False)\n tabname = cls._table_name('tab', relative=True)\n yacc.yacc(module=cls, tabmodule=tabname, optimize=True, debug=False)\n os.chdir(cwd)", "def presence_absence(table: biom.Table) -> biom.Table:\n table.pa(inplace=True)\n return table", "def create_meta_loan_table(self):\n table_exists = self.check_if_table_exists(\"meta_loan_tables\")\n\n if not table_exists:\n self.read_sql_from_file('create_meta_loan_tables.sql')\n return", "def _find_table(name):\n tables = Base.metadata.tables\n table = tables.get(name, None)\n if table is not None:\n return table\n else:\n raise NameError('Unable to locate table: %s' % name)", "def fetch_table_schema(self, table_name):\n ddl = self.query(sql.show_create_table(table_name))\n if ddl:\n try:\n return parse_create(ddl[0][\"Create Table\"])\n except ParseError as e:\n raise OSCError(\n \"TABLE_PARSING_ERROR\",\n {\"db\": self._current_db, \"table\": self.table_name, \"msg\": str(e)},\n )", "def peek(self) -> str:\n return self.head.info", "def test_fkey_nav_stops_on_skip(dumper, db):\n dumper.reader.load_db(\n db.create_sample(\n 5,\n fkeys=[\n (\"table1\", \"t2id\", \"table2\", \"id\"),\n (\"table2\", \"t3id\", \"table3\", \"id\"),\n (\"table3\", \"t4id\", \"table4\", \"id\"),\n ],\n )\n )\n dumper.add_config(\n {\n \"db_objects\": [\n {\"name\": \"table1\"},\n {\"name\": \"table3\", \"action\": \"skip\"},\n ]\n }\n )\n dumper.perform_dump()\n objs = [obj for obj, match in dumper.writer.dumped if isinstance(obj, Table)]\n assert len(objs) == 2", "def test_default_data_start(parallel, read_basic):\n text = \"ignore this line\\na b c\\n1 2 3\\n4 5 6\"\n table = read_basic(text, header_start=1, parallel=parallel)\n expected = Table([[1, 4], [2, 5], [3, 6]], names=(\"a\", \"b\", \"c\"))\n assert_table_equal(table, expected)", "def check_table(self, table_name: str) -> bool:\n try:\n if self.engine.dialect.has_table(self.engine.connect(), table_name):\n return self.get_input(table_name)\n return False\n except Exception as err:\n logger.error(\"check_table [error] -> %s\" % err)\n return False", "def is_file_ingested(self, original_name, tablename):\n prep_stmt = self.session.prepare(\n 'SELECT * FROM {0} WHERE {1}=?'.format(tablename, COLUMNS_META[2])\n )\n bound = prep_stmt.bind([original_name])\n results = self.session.execute(bound)\n return True if len(results.current_rows) > 0 else False", "def _get_table_reflection(self, schema: str, table: str) -> Table:\n return self.sql_metadata.tables.get(f\"{schema}.{table}\",\n Table(table, self.sql_metadata, schema=schema, autoload=True))", "def _is_begin_of_label(self, prev_top: str, now_top: str, prev_type: str, now_type: str) -> bool:\n\n if now_top in ['B', 'S']:\n return True\n elif now_top != 'O' and prev_type and prev_type != now_type:\n return True\n return False", "def test_abbreviate_miss():\n statement = \"PEEK(1234)\"\n assert abbreviate(build_match_tree(ABBREVIATIONS), statement) == \"PEEK(1234)\"\n statement = \"QUIT\"\n assert abbreviate(build_match_tree(ABBREVIATIONS), statement) == \"QUIT\"\n statement = \"ENDPRO\"\n assert abbreviate(build_match_tree(ABBREVIATIONS), statement) == \"ENDPRO\"\n statement = \"POSITIOM\"\n assert abbreviate(build_match_tree(ABBREVIATIONS), statement) == \"POSITIOM\"", "def parse(self, lines):\n # Keep count of the current line number.\n i = 0\n # list tables and content\n tables = dict()\n attr_param = list()\n\n skipped_lines = list() # DEBUG\n\n # Loop through all lines.\n for i in range(0, len(lines)):\n line_stripped = lineNormalise(lines[i])\n skip = True\n\n for keyword in self.target_keywords:\n\n # Look for keywords at the beginning of the line.\n if line_stripped.startswith(keyword):\n # print(\"{} : {}\".format(i, line_stripped)) # DEBUG\n skip = False\n\n # Found one, do parse\n expression = re.search(r'(\\w+) (\\w+)', line_stripped)\n if keyword is self.target_keywords[0]: # class/table\n # get table name\n table_name = expression.group(2)\n\n # add it in tables if not already in\n # tables (classes) may be at differant place in a PlantUML file\n if table_name not in tables:\n tables[table_name] = list()\n # print(\"Table : «{}» ajoutee\".format(expression.group(2))) # DEBUG\n print(\"{} : +table «{}»\".format(i, table_name)) # DEBUG\n\n elif keyword is self.target_keywords[1]: # primary key\n # import pdb; pdb.set_trace()\n # get related table\n attr_param = (re.sub(r'(pyk\\()|\\)|,|\\n', r' ', line_stripped).strip().split())\n tables[table_name].extend(attr_param)\n print(\"{} :\\t«{}» +{}\".format(i, table_name, attr_param)) # DEBUG\n\n elif keyword is self.target_keywords[2]: # foreign key\n # get related table\n attr_param = (re.sub(r'(fnk\\()|\\)|,|\\n', r' ', line_stripped).strip().split())\n tables[table_name].extend(attr_param)\n print(\"{} :\\t«{}» +{}\".format(i, table_name, attr_param)) # DEBUG\n\n\n elif keyword is self. target_keywords[3]: # primary foreign key\n # get related table\n attr_param = (re.sub(r'(pfk\\()|\\)|,|\\n', r' ', line_stripped).strip().split())\n tables[table_name].extend(attr_param)\n print(\"{} :\\t«{}» +{}\".format(i, table_name, attr_param)) # DEBUG\n\n else: # attribute\n # print(line_stripped) # DEBUG\n print(\"{} : \\t«{}» Attribute? {}\".format(i, line_stripped)) # DEBUG\n\n if skip:\n skipped_lines.append(i)\n\n print(\"\\nNumbers of tables : {}\\n\".format(len(tables)))\n pp = pprint.PrettyPrinter(indent=4, compact=True)\n print(\"Scraped data:\")\n pp.pprint(tables) # DEBUG\n print(\"\\nSkipped lines: {}\\n\".format(skipped_lines)) # DEBUG", "def should_set_tablename(cls):\n if (\n cls.__dict__.get('__abstract__', False)\n or not any(isinstance(b, BaseDeclarativeMeta) for b in cls.__mro__[1:])\n ):\n return False\n\n for base in cls.__mro__:\n if '__tablename__' not in base.__dict__:\n continue\n\n if isinstance(base.__dict__['__tablename__'], declared_attr):\n return False\n\n return not (\n base is cls\n or base.__dict__.get('__abstract__', False)\n or not isinstance(base, BaseDeclarativeMeta)\n )\n\n return True", "def home(self):\n while self.document.characters[self.position-1].character != '\\n':\n self.position -= 1\n if self.position == 0:\n # Got to beginning of file before newline\n break", "def get_tables_name_and_type(self) -> Optional[Iterable[Tuple[str, str]]]:\n try:\n schema_name = self.context.database_schema.name.__root__\n if self.source_config.includeTables:\n for table_and_type in self.query_table_names_and_types(schema_name):\n table_name = self.standardize_table_name(\n schema_name, table_and_type.name\n )\n table_fqn = fqn.build(\n self.metadata,\n entity_type=Table,\n service_name=self.context.database_service.name.__root__,\n database_name=self.context.database.name.__root__,\n schema_name=self.context.database_schema.name.__root__,\n table_name=table_name,\n skip_es_search=True,\n )\n if filter_by_table(\n self.source_config.tableFilterPattern,\n table_fqn\n if self.source_config.useFqnForFiltering\n else table_name,\n ):\n self.status.filter(\n table_fqn,\n \"Table Filtered Out\",\n )\n continue\n yield table_name, table_and_type.type_\n\n if self.source_config.includeViews:\n for view_name in self.inspector.get_view_names(schema_name):\n view_name = self.standardize_table_name(schema_name, view_name)\n view_fqn = fqn.build(\n self.metadata,\n entity_type=Table,\n service_name=self.context.database_service.name.__root__,\n database_name=self.context.database.name.__root__,\n schema_name=self.context.database_schema.name.__root__,\n table_name=view_name,\n )\n\n if filter_by_table(\n self.source_config.tableFilterPattern,\n view_fqn\n if self.source_config.useFqnForFiltering\n else view_name,\n ):\n self.status.filter(\n view_fqn,\n \"Table Filtered Out\",\n )\n continue\n yield view_name, TableType.View\n except Exception as err:\n logger.warning(\n f\"Fetching tables names failed for schema {schema_name} due to - {err}\"\n )\n logger.debug(traceback.format_exc())", "def test_dump_load_round_trip(self, tablename):\n\n # copy fits file to the temp directory\n self.copy_file(tablename)\n\n datafile = self.temp(\"data.txt\")\n cdfile = self.temp(\"coldefs.txt\")\n hfile = self.temp(\"header.txt\")\n fits.tabledump(self.temp(tablename), datafile, cdfile, hfile)\n\n new_tbhdu = fits.tableload(datafile, cdfile, hfile)\n\n with fits.open(self.temp(tablename)) as hdul:\n _assert_attr_col(new_tbhdu, hdul[1])", "def start_table(self, name, style_name):\n raise NotImplementedError", "def peek(self):\n try:\n t = self.items[self.pos]\n except IndexError:\n raise EOF()\n return t", "def get_table_partition_details( # pylint: disable=unused-argument\n self,\n table_name: str,\n schema_name: str,\n inspector: Inspector,\n ) -> Tuple[bool, Optional[TablePartition]]:\n return False, None # By default the table will be a Regular Table", "def is_toc(self, par):\n return \"toc\" in par.attrs.get(\"class\", [])", "def check_table(schemaname=settings.DEFAULT_SCHEMA, tablename=settings.STATES):\n\n conn = None\n cur = None\n\n try:\n\n conn = utils.pgconnect(**settings.DEFAULT_CONNECTION)\n cur = conn.cursor()\n cur.execute(\"\"\"SELECT to_regclass('%s.%s');\"\"\", (AsIs(schemaname), AsIs(tablename)))\n result = cur.fetchone()[0]\n\n return (True if result else False)\n\n except Exception as e:\n raise Exception(e)\n\n finally:\n if conn: conn = None\n if cur: cur = None", "def _get_table(self, cursor):\n raise NotImplementedError", "def test_read_tab(parallel, read_tab):\n if parallel:\n pytest.xfail(\"Multiprocessing can fail with quoted fields\")\n text = '1\\t2\\t3\\n a\\t b \\t\\n c\\t\" d\\n e\"\\t '\n table = read_tab(text, parallel=parallel)\n assert_equal(table[\"1\"][0], \" a\") # preserve line whitespace\n assert_equal(table[\"2\"][0], \" b \") # preserve field whitespace\n assert table[\"3\"][0] is ma.masked # empty value should be masked\n assert_equal(table[\"2\"][1], \" d\\n e\") # preserve whitespace in quoted fields\n assert_equal(table[\"3\"][1], \" \") # preserve end-of-line whitespace", "def parse_table_schema(conn):\r\n cur = conn.cursor()\r\n\r\n cur.execute(\"PRAGMA table_info({})\".format(\"week5\"))\r\n print(cur.fetchall())" ]
[ "0.5321881", "0.5258151", "0.5246666", "0.51772755", "0.5133962", "0.4889261", "0.47756714", "0.4769611", "0.4711629", "0.46457544", "0.45855626", "0.45844787", "0.45794702", "0.4570339", "0.4569376", "0.456038", "0.45316866", "0.45240548", "0.4521015", "0.45061612", "0.44987288", "0.44811767", "0.44659045", "0.44091237", "0.4408919", "0.44010186", "0.4384943", "0.43826067", "0.43607122", "0.43607122", "0.43543887", "0.4347756", "0.43471885", "0.43425924", "0.4334272", "0.43124354", "0.4300054", "0.42834428", "0.42736763", "0.42660525", "0.42648122", "0.42532703", "0.4248312", "0.42460674", "0.42388654", "0.4237418", "0.42301327", "0.4228592", "0.42235744", "0.42147127", "0.4211975", "0.42035607", "0.4203239", "0.4193344", "0.41917428", "0.41880527", "0.41765967", "0.41761607", "0.41658866", "0.41608658", "0.41553968", "0.41503683", "0.414553", "0.4143288", "0.41356453", "0.41261724", "0.4121564", "0.41199267", "0.41178373", "0.41126537", "0.41114837", "0.41106313", "0.40959096", "0.4086594", "0.40849543", "0.4076092", "0.40743002", "0.40724713", "0.4069645", "0.40680918", "0.40664977", "0.40638673", "0.4063555", "0.40583315", "0.405295", "0.40513408", "0.40503478", "0.4050088", "0.40500537", "0.40478688", "0.4045788", "0.40455672", "0.40441418", "0.40432394", "0.40382522", "0.4037427", "0.4033575", "0.40303618", "0.40289727", "0.40286967" ]
0.7672022
0
Parses all siblings of the provided table first and bundles them into an AoT.
def _parse_aot(self, first, name_first): # type: (Item, str) -> Item payload = [first] self._aot_stack.append(name_first) while not self.end(): is_aot_next, name_next = self._peek_table() if is_aot_next and name_next == name_first: _, table = self._parse_table() payload.append(table) else: break self._aot_stack.pop() return AoT(payload)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def html_table_to_xmltree_sub(node):\n # Split text into Token nodes\n # NOTE: very basic token splitting here... (to run through CoreNLP?)\n if node.text is not None:\n for tok in re.split(r'\\s+', node.text):\n node.append(et.Element('token', attrib={'word':tok}))\n \n # Recursively append children\n for c in node:\n node.append(html_table_to_xmltree_sub(c))\n return node", "def parse_table_in_rows(self, table):\n parsed_table = []\n for tr in table.find_elements_by_tag_name('tr'):\n parsed_table.append(tr)\n return parsed_table", "def process_table(soup):\n rests = []\n rows = soup.find('table',attrs={\"id\" : \"rest_list\"}).find_all('tr')\n for r in rows[1:len(rows)]:\n rdict = rest_row(r)\n if rdict:\n rests.append(rdict)\n return rests", "def _parse_table(self): # type: (Optional[str]) -> Tuple[Key, Item]\n indent = self.extract()\n self.inc() # Skip opening bracket\n\n is_aot = False\n if self._current == \"[\":\n if not self.inc():\n raise self.parse_error(UnexpectedEofError)\n\n is_aot = True\n\n # Key\n self.mark()\n while self._current != \"]\" and self.inc():\n pass\n\n name = self.extract()\n key = Key(name, sep=\"\")\n\n self.inc() # Skip closing bracket\n if is_aot:\n # TODO: Verify close bracket\n self.inc()\n\n cws, comment, trail = self._parse_comment_trail()\n\n result = Null()\n values = Container()\n\n while not self.end():\n item = self._parse_item()\n if item:\n _key, item = item\n if not self._merge_ws(item, values):\n values.append(_key, item)\n else:\n if self._current == \"[\":\n _, name_next = self._peek_table()\n\n if self._is_child(name, name_next):\n key_next, table_next = self._parse_table()\n key_next = Key(key_next.key[len(name + \".\") :])\n\n values.append(key_next, table_next)\n\n # Picking up any sibling\n while not self.end():\n _, name_next = self._peek_table()\n\n if not self._is_child(name, name_next):\n break\n\n key_next, table_next = self._parse_table()\n key_next = Key(key_next.key[len(name + \".\") :])\n\n values.append(key_next, table_next)\n else:\n table = Table(\n values, Trivia(indent, cws, comment, trail), is_aot\n )\n\n result = table\n if is_aot and (\n not self._aot_stack or name != self._aot_stack[-1]\n ):\n result = self._parse_aot(table, name)\n\n break\n else:\n raise self.parse_error(\n InternalParserError,\n (\"_parse_item() returned None on a non-bracket character.\"),\n )\n\n if isinstance(result, Null):\n result = Table(values, Trivia(indent, cws, comment, trail), is_aot)\n\n return key, result", "def iter_trios(self):\n for row in self._ped_tab.itertuples():\n if row.father == '0':\n continue\n if row.mother == '0':\n continue\n yield (row.sample_id, row.father, row.mother)", "def extract_tables(node):\n processed_set = set([node])\n table_set = set()\n working_set = [node]\n while working_set:\n elt = working_set.pop(0)\n if isinstance(elt, ML_NewTable):\n table_set.add(elt)\n elif not isinstance(elt, ML_LeafNode):\n for op_node in elt.inputs:\n if not op_node in processed_set:\n processed_set.add(op_node)\n working_set.append(op_node)\n return table_set", "def get_table_entries(self, soup, table_id):\r\n table_soup = soup.find(id=table_id).find_parent('tr')\r\n tr_list = table_soup.find_next_siblings('tr')\r\n\r\n bundle_contents = []\r\n for row in tr_list:\r\n # normally, this is a name template\r\n name_template_sibling = row.find(id='nametemplate')\r\n\r\n # if its a quality item do this\r\n bundle_quality_sibling = row.find(id='qualitycontainersm')\r\n\r\n if name_template_sibling is not None:\r\n bundle_link = name_template_sibling.find('a')\r\n bundle_contents.append(bundle_link.attrs['title'])\r\n elif bundle_quality_sibling is not None:\r\n bundle_quality_item = bundle_quality_sibling.find_parent('td')\r\n bundle_td = bundle_quality_item.find_next_sibling('td')\r\n # strip out link from td text\r\n bundle_contents.append(bundle_td.text.strip())\r\n\r\n return bundle_contents", "def parse_soup(self, table):\n rows = table.find_all('tr')\n list_of_lists = list()\n time = pd.Timestamp('now')\n for row in rows:\n row_list = list()\n row_list.append(time)\n for td in row.find_all('td')[1:]:\n row_list.append(td.text)\n if td('a'):\n for a in td('a'):\n if a.get('href'):\n m = re.search('teamId\\=(\\d+)', a.get('href'))\n if m:\n row_list.append(m.group(1))\n list_of_lists.append(row_list)\n return [[y for y in x if y] for x in list_of_lists[3:]]", "def expand(table):\n t = []\n for r in table:\n for _ in range(r[1]):\n try:\n t.append((r[0], r[2]))\n except IndexError:\n t.append((r[0], None))\n return t", "def getSiblings():", "def parse_table(table):\n rows = table.find_all('tr')\n if not rows:\n raise ValueError(\"No rows for table\")\n pages = []\n table_tag = \"<table>\"\n tbl_headers = get_tbl_headers(rows)\n table_tag += \"<tr>\"\n for header in tbl_headers.keys():\n table_tag += conf.ADD_TH_TAG(header)\n table_tag += \"</tr>\"\n for row in rows:\n cols = row.find_all('td')\n if not cols:\n continue\n for page_name in cols[0].find_all('a'):\n if not page_name:\n continue\n pages.append(page_name.text)\n table_tag += '<tr>'\n for header, col in tbl_headers.items():\n try:\n table_tag += f\"<td>{preprocess_data(f'{header} : {cols[col].text}')} \\t</td>\"\n except IndexError:\n pass\n table_tag += '</tr>'\n table_tag += '</table>'\n if conf.DOWNLOAD_IMAGES:\n download_images(pages)\n return table_tag", "def add_table(self,table):\n \n attr_list = []\n name = None\n comment = \"\"\n new_table = None\n t_id = table.attrib['id']\n \n \n #first cycles the child elements to find table name\n #and creates a table object\n for child in table:\n #table name\n if child.attrib['name'] == 'name':\n name = super(DatabaseUmlParser,self).stripHashtags(child[0].text)\n \n elif child.attrib['name'] == 'comment':\n comment = super(DatabaseUmlParser,self).stripHashtags(child[0].text)\n \n \n #check if table name found and table created\n if not name == None:\n new_table = db_table.Table(name,t_id,comment)\n \n else: ###\n self.err.print_error(\"dia:table_name_missing\") ###\n e_code = self.err.exit_code[\"xml\"] ###\n ###\n exit(e_code) ###\n \n \n #then cycles again to find the other relevant child elements\n for child in table:\n \n if child.attrib['name'] == 'attributes': \n new_root = child\n \n for child in new_root:\n new_attr = self.parse_attribute(child,new_table)\n attr_list.append(new_attr)\n \n if new_attr.p_key_flag:\n new_table.p_key.append(new_attr)\n \n new_table.attr_list = attr_list\n self.table_dict[t_id] = new_table\n \n return", "def adapt_htable(oHtable):\n\n # Starting the summary\n summary = \"\"\n # Get any text at this level\n if 'txt' in oHtable:\n if 'type' in oHtable and oHtable['type'] != 'Star' and oHtable['type'] != 'Zero':\n if summary != \"\": summary += \" \"\n summary += oHtable['txt']\n # Walk all the [child] tables\n if 'child' in oHtable:\n # Do this depth-first\n for chTable in oHtable['child']:\n # First go down\n oBack = adapt_htable(chTable)\n # Add the summary\n if 'summary' in oBack:\n if summary != \"\": summary += \" \"\n summary += oBack['summary']\n # Then set the summary link\n oHtable['summary'] = summary\n\n # Return the result\n return oHtable", "def _process(self, tables=None):\n\n if self._tables:\n return self._tables\n\n tables = tables or {}\n\n for row in self.url.generator.iter_rp:\n\n table_id_key = row['Table ID'].strip().lower()\n\n if not row['Line Number'].strip():\n if 'Universe' not in row['Table Title']:\n if table_id_key not in tables:\n tables[table_id_key] = Table(row['Table ID'], row['Table Title'].strip().title(),\n seq=row['Sequence Number'],\n startpos=int(row['Start Position']))\n else:\n tables[table_id_key].seq = row['Sequence Number']\n tables[table_id_key].startpos = row['Start Position']\n tables[table_id_key].subject = row['Subject Area']\n\n else:\n tables[table_id_key].universe = row['Table Title'].replace('Universe: ', '').strip()\n\n else: # column row\n try:\n\n line_no = int(row['Line Number'])\n\n if not line_no in tables[table_id_key].columns:\n tables[table_id_key].columns[line_no] = Column(row['Table ID'],\n f\"{row['Table ID']}_{line_no:03}\",\n line_no,\n description=row['Table Title'])\n else:\n tables[table_id_key].columns[line_no].description = row['Table Title']\n\n\n except ValueError as e:\n # Headings, which have fractional line numebrs\n # print(row)\n pass\n\n self._tables = tables\n\n return self._tables", "def find_children(start_tag, tag_table):\n pure_child = pd.Series([])\n parents = pd.Series([start_tag])\n while parents.shape[0] > 0:\n pure_child = pd.concat([pure_child,\n parents[~parents\n .isin(tag_table['Parent'])]])\n parents = tag_table.loc[tag_table['Parent']\n .isin(parents[parents\n .isin(tag_table['Parent'])]),\n 'Child']\n return pure_child", "def table_parsing(self):\n table_count=0\n if self.table: \n for tebil in self.table:\n json_list=[]\n try:\n table_caption = wtp.parse(str(tebil)).tables[0].caption\n table_folder_name=remove_markup(str(table_caption))\n table_folder_name=table_folder_name.lower()\n table_folder_name=table_folder_name.strip()\n except Exception as e:\n print('Exception: table folder name or out of list in table', str(e))\n continue \n if table_caption:\n try:\n self.revision_page_folder_path=os.path.join(self.rd_folder_path_table,self.page_folder)\n if not os.path.exists(self.revision_page_folder_path):\n os.mkdir(self.revision_page_folder_path)\n table_folder_name=table_folder_name.strip('\\n')\n revision_table_folder_path=os.path.join(self.revision_page_folder_path,table_folder_name)\n revision_table_folder_path=revision_table_folder_path.strip()\n if not os.path.exists(revision_table_folder_path):\n os.mkdir(revision_table_folder_path)\n except Exception as e:\n print('Exception: revision table folder', str(e))\n continue\n table_count=table_count+1\n json_list.append(str(tebil))\n json.dump(json_list, open(os.path.join(revision_table_folder_path, self.revision_id_parent + '_' + self.revision_id_current + \".json\"), \"w\"))\n print('Table caption: ', table_folder_name)\n table_count=table_count+1 \n return table_count", "def _get_all_table_elements(self):\n for row in self.driver.find_elements_by_css_selector(\"table\"):\n cells = row.find_elements_by_tag_name(\"td\")\n for cell in cells:\n cell_text = cell.text\n if \"VIEW\" in cell_text:\n yield (cell.get_attribute(\"href\"), cell_text)\n else:\n yield cell_text", "def repeat_tables(primer, table, dependencies, ignore_header=True):\n knowledge_map = learn(primer, dependencies)\n completed = []\n for row in table:\n # copy everything over\n completed_row = row\n for dvcol, ivcol in dependencies.items():\n iv = row[ivcol]\n # if the value is empty and we know what to put\n if row[dvcol] == \"\" and iv in knowledge_map[dvcol]:\n # fill in what we learned\n completed_row[dvcol] = knowledge_map[dvcol][iv]\n completed.append(completed_row)\n return completed", "def parse(self):\n for index in range(len(self.columns)):\n if index in self.columns:\n self.parsed_table[self.columns[index].col_name_str] = []\n if not self.table.linked_pages:\n return self.create_empty_table()\n for data_chunk in self.table.linked_pages:\n original_data = data_chunk\n parsed_data = parse_data_page_header(original_data, version=self.version)\n\n last_offset = None\n for rec_offset in parsed_data.record_offsets:\n # Deleted row - Just skip it\n if rec_offset & 0x8000:\n last_offset = rec_offset & 0xfff\n continue\n # Overflow page\n if rec_offset & 0x4000:\n # overflow ptr is 4 bits flags, 12 bits ptr\n rec_ptr_offset = rec_offset & 0xfff\n # update last pointer to pointer without flags\n last_offset = rec_ptr_offset\n # The ptr is the offset in the current data page. we get a 4 byte record_pointer from that\n overflow_rec_ptr = original_data[rec_ptr_offset:rec_ptr_offset + 4]\n overflow_rec_ptr = struct.unpack(\"<I\", overflow_rec_ptr)[0]\n record = self._get_overflow_record(overflow_rec_ptr)\n if record:\n self._parse_row(record)\n continue\n # First record is actually the last one - from offset until the end of the data\n if not last_offset:\n record = original_data[rec_offset:]\n else:\n record = original_data[rec_offset:last_offset]\n last_offset = rec_offset\n if record:\n self._parse_row(record)\n return self.parsed_table", "def _finalize_splittable_nodes(self):\n while len(self.splittable_nodes) > 0:\n node = self.splittable_nodes.pop()\n self._finalize_leaf(node)", "def getTransitions(tree, root, transitionTable):\n\n for transition in root.iter('transition'):\n # iter(), searches recursively over all sub-trees\n for path in transition.iter('from'):\n From = path.text\n\n for path in transition.iter('to'):\n To = path.text\n\n for path in transition.iter('read'):\n # None is considered Epsilon\n if path.text == None:\n Symbol = 'Epsilon'\n else:\n Symbol = path.text\n\n table = {From:[[To, Symbol]]}\n\n if From in transitionTable.keys():\n # Check if an entry already exists for that node\n transitionTable[From].append([To, Symbol])\n else:\n # Make new entry\n transitionTable.update(table)\n \n return transitionTable", "def algT(ts):\n parent = np.array([-1]*len(ts.tables.nodes),\n dtype=np.int32)\n\n # The length of the genome\n maxpos = ts.get_sequence_length()\n i, o = 0, 0\n current_left = 0.0\n num_edges = len(ts.tables.edges)\n\n inorder, outorder = index_edge_table(ts)\n\n while i < num_edges or current_left < maxpos:\n # Remove parents from the tree\n while o < len(outorder) and \\\n outorder[o].position == current_left:\n p = outorder[o].parent\n c = outorder[o].child\n parent[c] = -1\n o += 1\n # Add parents to the tree\n while i < num_edges and \\\n inorder[i].position == current_left:\n p = inorder[i].parent\n c = inorder[i].child\n parent[c] = p\n i += 1\n # Get the right edge of the current tree\n right = maxpos\n if i < num_edges:\n right = min(right, inorder[i].position)\n if o < num_edges:\n right = min(right, outorder[o].position)\n # Send the current state of the tree\n # back to the calling environment\n yield current_left, parent\n\n # Update the left edge for the next\n # iteration through\n current_left = right", "def siblings(self, u):\n if u == self.virtual_root:\n return tuple()\n parent = self.parent(u)\n if self.is_root(u):\n parent = self.virtual_root\n if parent != tskit.NULL:\n return tuple(v for v in self.children(parent) if u != v)\n return tuple()", "def parse_single_table(source, **kwargs):\n if kwargs.get(\"table_number\") is None:\n kwargs[\"table_number\"] = 0\n\n votable = parse(source, **kwargs)\n\n return votable.get_first_table()", "def update(ts):\n tables = ts.dump_tables()\n update_tables(tables)\n return tables.tree_sequence()", "async def iterate_and_pass(table_sub: pxapi.TableSub) -> None:\n async for _ in table_sub:\n pass", "def partition_ethosu_by_table(mod, pattern_table):\n mod = relay.transform.InferType()(mod)\n mod = mod = codegen.replicate_pads(mod)\n mod = relay.transform.InferType()(mod)\n mod = relay.transform.MergeComposite(pattern_table)(mod)\n mod = relay.transform.AnnotateTarget(\"ethos-u\")(mod)\n mod = relay.transform.MergeCompilerRegions()(mod)\n mod = relay.transform.InferType()(mod)\n mod = relay.transform.PartitionGraph()(mod)\n mod = relay.transform.InferType()(mod)\n mod = preprocess.preprocess_ext_io()(mod)\n return mod", "def transform_single_table_def(self, node: Tree) -> Table:\n assert node.data == 'table' and len(node.children) == 2\n\n table_name: Token = node.children[0]\n fields_node: Tree = node.children[1]\n\n # Check that all field names are unique\n collected = set()\n for field_name in fields_node.children:\n if field_name in collected:\n raise SyntaxError(\n f\"duplicated field name {str(field_name)!r} \"\n f\"at line {field_name.line} column {field_name.column}\",\n )\n collected.add(field_name)\n\n return Table(table_name, tuple(fields_node.children))", "def get_table_row_values(self):\n tag_items = self.soup.find_all(\"tr\")\n table_rows = []\n for tag_item in tag_items:\n tag_child_item_values = tag_item.find_all(\"td\")\n tag_item_child_values = []\n for tag_child_item_value in tag_child_item_values:\n tag_item_child_values.append(tag_child_item_value.text.strip())\n table_rows.append(tag_item_child_values)\n return table_rows", "def fill_table(self, executer, tree, cursor, table):\n counter = 0\n table_content = executer.lots_of_eggs(cursor, table)\n for line in table_content:\n tree.insert('', 'end', text=counter, values=line)\n counter += 1", "def inline_single_starrable(self):\n\n # Map a rule name to the phrase it should be replaced with.\n replacement = dict()\n\n # Process descendants first\n for A in reversed(self.preorder()):\n A_rule = self.rules[A].as_container()\n if len(A_rule) == 1:\n option = A_rule[0].as_container()\n if len(option) == 1:\n first = option[0]\n if first.is_symbol_name():\n first_name = first.content\n if self.rules[first_name].as_starred(first_name) is not None:\n replacement[A] = [first]\n\n # Update this rule with any scheduled replacements.\n changed_rule = False\n new_options = []\n for option in A_rule:\n changed_parts = False\n parts = []\n for x in option.as_container():\n if x.is_symbol_name() and x.content in replacement:\n parts.extend(replacement[x.content])\n changed_parts = True\n changed_rule = True\n else:\n parts.append(x)\n new_options.append(self.MakeSeq(parts) if changed_parts else option)\n if changed_rule:\n self.rules[A] = self.MakeChoice(new_options)\n\n self.remove_unused_rules()", "def update_table(self):\r\n self.cursor.execute(\"\"\"SELECT * FROM transactions\"\"\")\r\n result = self.cursor.fetchall()\r\n self.tree.delete(*self.tree.get_children())\r\n for item in result:\r\n self.tree.insert('', 'end', text=item[0], values=item[1:])", "def parse_edges(source, strict=True, table=None):\n sep = None\n if strict:\n sep = \"\\t\"\n if table is None:\n table = tables.EdgeTable()\n header = source.readline().rstrip(\"\\n\").split(sep)\n left_index = header.index(\"left\")\n right_index = header.index(\"right\")\n parent_index = header.index(\"parent\")\n children_index = header.index(\"child\")\n for line in source:\n tokens = line.rstrip(\"\\n\").split(sep)\n if len(tokens) >= 4:\n left = float(tokens[left_index])\n right = float(tokens[right_index])\n parent = int(tokens[parent_index])\n children = tuple(map(int, tokens[children_index].split(\",\")))\n for child in children:\n table.add_row(left=left, right=right, parent=parent, child=child)\n return table", "def parse(self, response):\n \n # Get table of data\n table_class = \"sortable responsive default\"\n table = response.xpath(f'//*[@class=\"{table_class}\"]')\n \n if table:\n # Get column headers, stripping ascending/descending markers\n columns = get_text_of_matching_elements(table, './/th', re_str='[▲|▼]')\n\n rows = table.xpath('.//tbody/tr')\n for row in rows:\n # Get value for each column\n td = get_text_of_matching_elements(row, './/td')\n fields = dict(zip(columns, td))\n \n # Create item with data dictionary\n item = Entry(state_name=self.state_name,\n timestamp=datetime.now(),\n fields=fields)\n \n # Follow link on each entry to get more detailed information,\n # updating the original item before yielding it to the pipelines\n entry_href = row.xpath('td[1]/*/@href').get()\n if entry_href:\n entry_link = urljoin(self.base_url, entry_href)\n yield scrapy.Request(entry_link, callback=self.parse_details, cb_kwargs={\"item\": item})\n else:\n logging.warning(f\"Expecting a link table entry to contain a link; {self.state_name} may need to update xpath selector. Yielding partial Entry\")\n yield item\n else:\n logging.error(f\"No table found for {self.state_name}; may need to updated xpath selector\")\n yield\n\n # Do the same for the next page, if any\n next_link = response.xpath(\"//a[contains(@class, 'next_page')]/@href\").get()\n\n if next_link is not None:\n self.page_count += 1\n yield response.follow(next_link, self.parse)\n else:\n logging.info(f\"Downloaded {self.page_count} pages of results for {self.state_name}\")", "def extract_tables(resp):\n if 'Blocks' not in resp.keys():\n return(None)\n tables = list(filter(lambda x: x['BlockType'] == 'TABLE', resp['Blocks']))\n blockmap = {}\n for b in resp['Blocks']:\n blockmap[b['Id']] = b\n\n t_unwrap = []\n for t in tables:\n \n # cells = list(filter(lambda x: x['BlockType'] == 'CELL', resp['Blocks']))\n cell_rels = t['Relationships']\n cell_child_rels = list(filter(lambda x: x['Type'] == 'CHILD', cell_rels))\n cell_ids = []\n for cr in cell_child_rels:\n cell_ids = cell_ids + cr['Ids']\n\n cells = [blockmap[c_id] for c_id in cell_ids]\n max_row_index = max([x['RowIndex'] for x in cells])\n min_row_index = min([x['RowIndex'] for x in cells])\n max_col_index = max([x['ColumnIndex'] for x in cells])\n min_col_index = min([x['ColumnIndex'] for x in cells]) \n\n df = pd.DataFrame(np.empty((max_row_index,max_col_index),dtype=object))\n for cell in cells:\n if 'Relationships' in cell.keys():\n rels = cell['Relationships']\n child_rels = list(filter(lambda x: x['Type'] == 'CHILD', rels))\n cell_words = []\n for c in child_rels:\n chs = c['Ids']\n for child_id in chs:\n bmx = blockmap[child_id]\n if bmx['BlockType'] == 'WORD':\n cell_words.append(bmx['Text'])\n\n pd_row = cell['RowIndex'] - 1\n pd_col = cell['ColumnIndex'] - 1 \n df.loc[pd_row, pd_col] = \" \".join(cell_words)\n \n t_unwrap.append(df)\n return(t_unwrap)", "def test_split_adds_children(mock_amg):\n\n mock_amg.cells[0].split()\n assert mock_amg.cells[0].children['bl'] is mock_amg.cells[-4]\n assert mock_amg.cells[0].children['br'] is mock_amg.cells[-3]\n assert mock_amg.cells[0].children['tl'] is mock_amg.cells[-2]\n assert mock_amg.cells[0].children['tr'] is mock_amg.cells[-1]", "def importXmlToTable(table):\n request = \"DELETE FROM %s\" % table\n execute(request)\n elms = doc.getElementsByTagName(\"text\")\n for el in elms:\n data = parseTextElm(el)\n if data == None: continue\n textstring, audiouri, xmlid, textflag, audioflag = data\n if el.parentNode.tagName == \"accelerator\":\n keys = \"\\\"%s\\\"\" % el.parentNode.getAttribute(\"keys\")\n else:\n keys=\"NULL\"\n \n request = \"\"\"INSERT INTO %(table)s (textstring, textflag, audioflag, audiouri, xmlid, actualkeys) \n VALUES (\\\"%(textstring)s\\\", %(textflag)d, %(audioflag)d, \\\"%(audiouri)s\\\", \\\"%(xmlid)s\\\", %(keys)s)\"\"\" \\\n % {\"table\": table, \"textstring\": textstring, \"textflag\": textflag, \"audioflag\": audioflag, \"audiouri\": audiouri, \n \"xmlid\": xmlid, \"keys\": keys}\n execute(request)\n \n setRoles(table)\n findMnemonicGroups(table)\n findAcceleratorTargets(table)\n return", "def table_row(data, tpat=re.compile('(<(?P<sl>/)?(?P<tag>t[rdh]|table)[^>]*>)', re.I)):\n data = re.sub(r\"[\\r\\n]+\", \" \", data)\n data = re.sub(r\"(?i)\\s*<(/?)td[^>]*>\\s*\", r\"<\\1td>\", data)\n data = re.sub(r\"(?i)\\s*<(/?)th[^>]*>\\s*\", r\"<\\1th>\", data)\n data = re.sub(r\"(?i)\\s*<tr[^>]*>\\s*\", \"\\n<tr>\", data)\n data = re.sub(r\"(?i)\\s*</tr[^>]*>\\s*\", \"</tr>\\n\", data)\n data = re.sub(r\"(?i)\\s*<(/?)table[^>]*>\\s*\", r\"\\n<\\1table>\\n\", data)\n return data", "def parseTable(chart):\n rowelems = chart.find_all('tr')\n rows = [rowelem.find_all('td') for rowelem in rowelems]\n data = [[elem.get_text() for elem in row] for row in rows]\n return(data)", "def _merge_table_data(self, first_page):\n table = self._table_defs.get(first_page * self.page_size)\n parsed_header = TDEF_HEADER.parse(table)\n data = table[parsed_header.header_end:]\n while parsed_header.next_page_ptr:\n table = self._table_defs.get(parsed_header.next_page_ptr * self.page_size)\n parsed_header = TDEF_HEADER.parse(table)\n data = data + table[parsed_header.header_end:]\n return data", "def parse(self, lines):\n # Keep count of the current line number.\n i = 0\n # list tables and content\n tables = dict()\n attr_param = list()\n\n skipped_lines = list() # DEBUG\n\n # Loop through all lines.\n for i in range(0, len(lines)):\n line_stripped = lineNormalise(lines[i])\n skip = True\n\n for keyword in self.target_keywords:\n\n # Look for keywords at the beginning of the line.\n if line_stripped.startswith(keyword):\n # print(\"{} : {}\".format(i, line_stripped)) # DEBUG\n skip = False\n\n # Found one, do parse\n expression = re.search(r'(\\w+) (\\w+)', line_stripped)\n if keyword is self.target_keywords[0]: # class/table\n # get table name\n table_name = expression.group(2)\n\n # add it in tables if not already in\n # tables (classes) may be at differant place in a PlantUML file\n if table_name not in tables:\n tables[table_name] = list()\n # print(\"Table : «{}» ajoutee\".format(expression.group(2))) # DEBUG\n print(\"{} : +table «{}»\".format(i, table_name)) # DEBUG\n\n elif keyword is self.target_keywords[1]: # primary key\n # import pdb; pdb.set_trace()\n # get related table\n attr_param = (re.sub(r'(pyk\\()|\\)|,|\\n', r' ', line_stripped).strip().split())\n tables[table_name].extend(attr_param)\n print(\"{} :\\t«{}» +{}\".format(i, table_name, attr_param)) # DEBUG\n\n elif keyword is self.target_keywords[2]: # foreign key\n # get related table\n attr_param = (re.sub(r'(fnk\\()|\\)|,|\\n', r' ', line_stripped).strip().split())\n tables[table_name].extend(attr_param)\n print(\"{} :\\t«{}» +{}\".format(i, table_name, attr_param)) # DEBUG\n\n\n elif keyword is self. target_keywords[3]: # primary foreign key\n # get related table\n attr_param = (re.sub(r'(pfk\\()|\\)|,|\\n', r' ', line_stripped).strip().split())\n tables[table_name].extend(attr_param)\n print(\"{} :\\t«{}» +{}\".format(i, table_name, attr_param)) # DEBUG\n\n else: # attribute\n # print(line_stripped) # DEBUG\n print(\"{} : \\t«{}» Attribute? {}\".format(i, line_stripped)) # DEBUG\n\n if skip:\n skipped_lines.append(i)\n\n print(\"\\nNumbers of tables : {}\\n\".format(len(tables)))\n pp = pprint.PrettyPrinter(indent=4, compact=True)\n print(\"Scraped data:\")\n pp.pprint(tables) # DEBUG\n print(\"\\nSkipped lines: {}\\n\".format(skipped_lines)) # DEBUG", "def write_table(self, tab):\n self.save_text()\n\n table = list()\n row = list()\n headers = tab['c'][3]\n if headers:\n has_content = False\n for col in headers:\n self.list_parse(col, cell_content=True)\n cell_content = self.get_content()\n row.append(cell_content)\n if cell_content != '':\n has_content = True\n if has_content:\n row = tuple(row)\n table.append(row)\n t_content = tab['c'][4]\n for line in t_content:\n row = list()\n for col in line:\n self.list_parse(col, cell_content=True)\n cell_content = self.get_content()\n row.append(cell_content)\n row = tuple(row)\n table.append(row)\n table = tuple(table)\n self.tables.append((table, (self.context, self.ancestor)))", "def get_wrapped_table(self):\n assert self.is_table_wrapper\n for child in self.children:\n if isinstance(child, TableBox):\n return child\n else: # pragma: no cover\n raise ValueError('Table wrapper without a table')", "def into(self, table):\n self._tables.set(table)\n return self", "def copy_table_after(table, paragraph):\n\n\ttbl, p = table._tbl, paragraph._p\n\tnew_tbl = deepcopy(tbl)\n\tp.addnext(new_tbl)", "def _table_tree(self, real_account):\n return [{\n 'account': ra.account,\n 'balances_children':\n serialize_inventory(realization.compute_balance(ra),\n at_cost=True),\n 'balances': serialize_inventory(ra.balance, at_cost=True),\n 'is_leaf': len(ra) == 0 or bool(ra.txn_postings),\n 'postings_count': len(ra.txn_postings)\n } for ra in realization.iter_children(real_account)]", "def get_hnodes(self,h):\n t_nodes = self.get_h(h)\n for t_node in t_nodes:\n t_node = self.tree.get_node(t_node)\n self.check_childs(t_node.identifier)", "def _preprocess(self):\n # A 2D table storing all possible queries.\n self._table = {}\n\n # Build the table using bottom-up dynamic programming.\n for p in breadth_first_traversal(self._tree):\n self._table[p.index()] = [p]\n\n l = 0\n while (l < self._tree.depth(p)):\n u = self._table[p.index()][l]\n w = self._tree.parent(u)\n self._table[p.index()].append(w)\n l += 1", "def html_table_to_xmltree(html):\n node = et.fromstring(re.sub(r'>\\s+<', '><', html.strip()))\n xml = html_table_to_xmltree_sub(node)\n return XMLTree(xml)", "async def table_data(graph, root, root_id):\n seen = set()\n while len(seen) < len(graph):\n found_something = False\n for child, info in graph.items():\n if child in seen:\n continue\n if set(info[\"fks\"]).difference({child}) <= seen:\n # I originally wrote this so that tables would be yielded in an order that ensured any related tables\n # and data would have already been copied. Not sure this is necessary anymore, since FK constraints\n # are not copied as part of CREATE TABLE LIKE.\n seen.add(child)\n found_something = True\n pk = list(info[\"pks\"].keys())[0]\n joins = find_joins(child, root, graph)\n if joins:\n parts = [\"SELECT {}.* FROM {}\".format(child, child)]\n last = child\n for parent, from_col, to_col in joins:\n parts.append(\n \"JOIN {table} ON {on}\".format(\n table=parent, on=\"{}.{} = {}.{}\".format(last, from_col, parent, to_col)\n )\n )\n last = parent\n parts.append(\"WHERE {}.{} = {}\".format(root, pk, root_id))\n yield child, \" \".join(parts), True\n elif child == root:\n yield child, \"SELECT * FROM {} WHERE {} = {}\".format(root, pk, root_id), True\n else:\n yield child, \"SELECT * FROM {}\".format(child), False\n if not found_something:\n print(\"Deadlock detected!\", file=sys.stderr, flush=True)\n sys.exit(1)", "def _parse_table(res, key_index, value_index):\n data = OrderedDict()\n for sel in res.xpath('//tr'):\n columns = sel.xpath('td')\n if len(columns) == value_index+1:\n key = ''.join(columns[key_index].xpath('.//text()').extract())\n key = base.helpers.slugify(key.strip())\n value = ''.join(columns[value_index].xpath('.//text()').extract())\n value = value.strip()\n if key and value:\n data[key] = value\n return data", "def parse_table(soup, start_gen, end_gen):\n pokes = []\n for cell in soup.find_all(\"td\", attrs={'style': None}):\n for name in cell.find_all(\"a\"):\n pokes.append(name.string)\n\n start_index = pokes.index(GEN_STARTS_WITH[start_gen])\n end_index = pokes.index(GEN_ENDS_WITH[end_gen]) + 1\n\n # Doesn't have to be ordered, just personal preference.\n unique_list = OrderedSet(pokes[start_index:end_index])\n\n if start_gen != end_gen:\n print(f\"{len(unique_list)} Pokémon from gen {start_gen} to {end_gen} were fetched.\")\n else:\n print(f\"{len(unique_list)} Pokémon from gen {start_gen} were fetched.\")\n\n pkmn_string = ', '.join(unique_list)\n\n for key, value in NIDORAN_CASE.items():\n # Handling of Nidoran male/female symbols.\n pkmn_string = pkmn_string.replace(key, value)\n\n return pkmn_string", "def merge_tables(self):\r\n\r\n table_params = pd.read_json(os.path.join(self.config_path, self.db_config_file),\r\n orient='records')[self.report_type]['table']\r\n\r\n self.trees = create_tree(table_params)\r\n\r\n for tree in self.trees:\r\n self._recursive_merge(tree=tree)\r\n self.merged_table.append(self._get_table(self.master_table[0]))\r\n\r\n # Drop table from list_of_tables, table_indexes and master_table\r\n del self.list_of_tables[self.table_indexes.index(self.master_table[0])]\r\n self.table_indexes.remove(self.master_table[0])\r\n del self.master_table[0]", "def tabulate(self, table=None):\n if not table:\n raise TypeError('tabulate needs a Table object')\n table.tabulate(dataset=self, from_dataset=True)\n table_record = table.create_history_record()\n self.representations.append(table_record)\n self._append_task(kind='representation', task=table_record)\n return table", "def test_split_otu_table_on_sample_metadata(self):\r\n actual = list(split_otu_table_on_sample_metadata(self.otu_table_f1,\r\n self.mapping_f1,\r\n \"Treatment\"))\r\n for id_, e in actual:\r\n try:\r\n parse_biom_table(e)\r\n except:\r\n print e\r\n actual = [(id_, parse_biom_table(e)) for id_, e in actual]\r\n exp = [(id_, parse_biom_table(e)) for id_, e in otu_table_exp1]\r\n\r\n actual.sort()\r\n exp.sort()\r\n\r\n for a, e in zip(actual, exp):\r\n self.assertEqual(a, e, \"OTU tables are not equal:\\n%s\\n%s\" %\r\n (format_biom_table(a[1]), format_biom_table(e[1])))", "def iter_rows(node, style, extra=()):\n yield Row.from_node(node, style, extra)\n children = node.children\n if children:\n last_idx = len(children) - 1\n for idx, child in enumerate(children):\n yield from iter_rows(child, style, extra + (idx != last_idx,))", "def test_getSiblings(self):\n previous, nextious = self.resolver.getSiblings(\n textId=\"urn:cts:latinLit:phi1294.phi002.perseus-lat2\", subreference=\"1.1\"\n )\n self.assertEqual(\n previous, \"1.pr\",\n \"Previous should be well computed\"\n )\n self.assertEqual(\n nextious, \"1.2\",\n \"Previous should be well computed\"\n )", "def fetch_table_tags(\n self,\n table_name: str,\n schema_name: str,\n inspector: Inspector,\n ) -> None:", "def grow(self):\n while self.splittable_nodes:\n self.split_next()", "def visit_table(self, sytable):\n def index(sytable):\n try:\n return sytable.get_column(self.input_index)\n except:\n return np.arange(sytable.number_of_rows())\n\n def slices_using_group_array(group_array):\n \"\"\"Return the slices to split by.\n A group array is made of strictly increasing group identifiers.\n\n >>> slices_using_group_array(np.array([0, 0, 0, 1, 1, 2, 3, 3, 3]))\n [(0, 3), (3, 5), (5, 6), (6, 9)]\n \"\"\"\n unique_elements = np.unique(group_array)\n slices = []\n for unique_element in unique_elements:\n indexes = np.flatnonzero(group_array == unique_element)\n low, high = (indexes[0], indexes[-1] + 1)\n slices.append((unique_element, slice(low, high)))\n return slices\n\n def indices_using_group_array(group_array):\n \"\"\"\n Return list of index lists, ordered by first occurance of value.\n \"\"\"\n unique_elements = np.unique(group_array)\n indices = []\n for unique_element in unique_elements:\n indices.append((unique_element,\n np.flatnonzero(group_array == unique_element)))\n return indices\n\n columns = sytable.columns()\n # Perform the split and append the new tables to output.\n slice_indices = indices_using_group_array(index(sytable))\n column_attrs = {}\n\n for unique_element, slice_index in slice_indices:\n # Sets of all columns except for the INDEX columns.\n result = type(sytable)(sytable.container_type)\n self.output_list.append((unique_element, result))\n\n for column in columns:\n array = sytable.get_column(column)[slice_index]\n if self.remove_fill and len(array):\n kind = array.dtype.kind\n if kind in ['S', 'U']:\n if np.all(array == ''):\n continue\n else:\n if not len(array) or np.isnan(np.min(array)):\n continue\n\n result.set_column(column, array)\n if column in column_attrs:\n attrs = column_attrs[column]\n else:\n attrs = dict(\n sytable.get_column_attributes(column).get())\n column_attrs[column] = attrs\n result.get_column_attributes(column).set(attrs)", "def nlp_tree(self, t, s=0):\n if t is not []:\n for word in t:\n # Ignore symbols & punctuation\n if word.pos_ != 'PUNCT' and word.pos_ != 'SYM':\n yield word.lemma_, s\n yield from self.nlp_tree(word.children, s + 1)", "def family(self):\r\n\r\n yield self\r\n for sibling in self.siblings():\r\n yield sibling", "def iter_siblings(self, axis: Optional[str] = None) -> Iterator[ChildNodeType]:\n if not isinstance(self.item, XPathNode) or self.item is self.root:\n return\n\n parent = self.item.parent\n if parent is None:\n return\n\n item = self.item\n status = self.item, self.axis\n self.axis = axis or 'following-sibling'\n\n if axis == 'preceding-sibling':\n for child in parent: # pragma: no cover\n if child is item:\n break\n self.item = child\n yield child\n else:\n follows = False\n for child in parent:\n if follows:\n self.item = child\n yield child\n elif child is item:\n follows = True\n self.item, self.axis = status", "def extract(soup):\r\n table = soup.find('div', id='dnn_ctr11396_TimeTableView_PlaceHolder').find('table')\r\n rows = table.findChildren('tr', recursive=False)\r\n return [[col.findAll('div', {'class': 'TTLesson'}) for col in row.findChildren('td', recursive=False)[1:]]\r\n for row in rows[1:]]", "def sprout_leaves(t, vals):", "def set_parent_table(self, table):\n self.__parent_table = table", "def sort_table(table, sats_table):", "def test_find_and_classify_node_children(self):\n\n this_node_table = skeleton_lines._find_and_classify_node_children(\n node_table=NODE_TABLE_SANS_CHILDREN,\n triangle_to_new_edge_table=TRIANGLE_TO_NEW_EDGE_TABLE,\n triangle_to_node_table=TRIANGLE_TO_NODE_TABLE)\n\n self.assertTrue(_compare_tables(\n NODE_TABLE_WITH_CHILDREN, this_node_table))", "def parse(self):\n \n root = self.xml_tree.getroot()\n \n #run for creating tables\n for child in root[1]:\n if child.attrib['type'] == 'Database - Table':\n self.add_table(child)\n \n \n #if table_dict empty -> wrong type of dia diagram\n if self.table_dict == {}: ###\n self.err.print_error(\"parser:database_wrong_dia\") ###\n e_code = self.err.exit_code[\"parser\"] ###\n ###\n exit(e_code) ###\n \n \n #run for adding references\n for child in root[1]:\n if child.attrib['type'] == 'Database - Reference':\n self.add_reference(child)\n \n return", "def traverse(self, prt):\n lst = []\n for e in self.items:\n if(e is not None):\n prt(e[1])\n lst.append(e[1])\n return lst", "def update_tables(tables):\n # First we ensure we can find the file format version number\n # in top-level metadata. Then we proceed to fix up the tables as necessary.\n if not (isinstance(tables.metadata, dict) and 'SLiM' in tables.metadata):\n # Old versions kept information in provenance, not top-level metadata.\n # Note this uses defaults on keys not present in provenance,\n # which prior to 0.5 was everything but generation and model_type.\n values = default_slim_metadata('tree_sequence')['SLiM']\n prov = None\n file_version = 'unknown'\n # use only the last SLiM provenance\n for p in tables.provenances:\n is_slim, this_file_version = slim_provenance_version(p) \n if is_slim:\n prov = p\n file_version = this_file_version\n values['file_version'] = file_version\n try:\n record = json.loads(prov.record)\n if file_version == \"0.1\":\n values['model_type'] = record['model_type']\n values['tick'] = record['generation']\n values['cycle'] = record['generation']\n else:\n if 'generation' in record['slim']:\n values['tick'] = record['slim']['generation']\n values['cycle'] = record['slim']['generation']\n for k in values:\n if k in record['parameters']:\n values[k] = record['parameters'][k]\n if k in record['slim']:\n values[k] = record['slim'][k]\n except:\n raise ValueError(\"Failed to obtain metadata from provenance.\")\n set_tree_sequence_metadata(tables, **values)\n\n file_version = tables.metadata['SLiM']['file_version']\n if file_version != slim_file_version:\n warnings.warn(\"This is a version {} SLiM tree sequence.\".format(file_version) +\n \" When you write this out, \" +\n \"it will be converted to version {}.\".format(slim_file_version))\n\n # the only tables to have metadata schema changed thus far\n # are populations, individuals, mutations, and top-level:\n old_schema = _old_metadata_schema(\"tree_sequence\", file_version)\n if old_schema is not None:\n md = tables.metadata\n new_schema = slim_metadata_schemas[\"tree_sequence\"]\n new_properties = new_schema.asdict()['properties']['SLiM']['required']\n tables.metadata_schema = new_schema\n defaults = default_slim_metadata(\"tree_sequence\")\n for k in new_properties:\n if k not in md['SLiM']:\n if k == \"tick\":\n md['SLiM']['tick'] = md['SLiM']['generation']\n md['SLiM']['cycle'] = md['SLiM']['generation']\n else:\n md['SLiM'][k] = defaults['SLiM'][k]\n tables.metadata = md\n\n old_schema = _old_metadata_schema(\"population\", file_version)\n if old_schema is not None:\n pops = tables.populations.copy()\n tables.populations.clear()\n if pops.metadata_schema == tskit.MetadataSchema(None):\n pops.metadata_schema = old_schema\n new_schema = slim_metadata_schemas[\"population\"]\n tables.populations.metadata_schema = new_schema\n defaults = default_slim_metadata(\"population\")\n # just needs recoding\n for pop in pops:\n tables.populations.append(pop)\n\n old_schema = _old_metadata_schema(\"individual\", file_version)\n if old_schema is not None:\n inds = tables.individuals.copy()\n tables.individuals.clear()\n if inds.metadata_schema == tskit.MetadataSchema(None):\n inds.metadata_schema = old_schema\n new_schema = slim_metadata_schemas[\"individual\"]\n tables.individuals.metadata_schema = new_schema\n defaults = default_slim_metadata(\"individual\")\n d = {}\n for k in [\"pedigree_p1\", \"pedigree_p2\"]:\n d[k] = defaults[k]\n for ind in inds:\n md = ind.metadata\n md.update(d)\n tables.individuals.append(ind.replace(metadata=md))\n\n old_schema = _old_metadata_schema(\"mutation\", file_version)\n if old_schema is not None:\n muts = tables.mutations.copy()\n tables.mutations.clear()\n if muts.metadata_schema == tskit.MetadataSchema(None):\n muts.metadata_schema = old_schema\n tables.mutations.metadata_schema = slim_metadata_schemas[\"mutation\"]\n for mut in muts:\n md = mut.metadata\n for ml in md['mutation_list']:\n ml['nucleotide'] = -1\n tables.mutations.append(mut.replace(metadata=md))\n\n if file_version == \"0.1\":\n # shift times\n slim_generation = tables.metadata['SLiM']['tick']\n node_times = tables.nodes.time + slim_generation\n tables.nodes.set_columns(\n flags=tables.nodes.flags,\n time=node_times,\n population=tables.nodes.population,\n individual=tables.nodes.individual,\n metadata=tables.nodes.metadata,\n metadata_offset=tables.nodes.metadata_offset)\n migration_times = tables.migrations.time + slim_generation\n tables.migrations.set_columns(\n left=tables.migrations.left,\n right=tables.migrations.right,\n node=tables.migrations.node,\n source=tables.migrations.source,\n dest=tables.migrations.dest,\n time=migration_times)\n\n new_record = {\n \"schema_version\": \"1.0.0\",\n \"software\": {\n \"name\": \"pyslim\",\n \"version\": pyslim_version,\n },\n \"parameters\": {\n \"command\": [\"updrade_tables\"],\n \"old_file_version\": file_version,\n \"new_file_version\": slim_file_version,\n },\n \"environment\": get_environment(),\n }\n tskit.validate_provenance(new_record)\n tables.provenances.add_row(json.dumps(new_record))\n\n set_metadata_schemas(tables)\n md = tables.metadata\n md['SLiM']['file_version'] = slim_file_version\n tables.metadata = md", "def treeparser(self,spot):\n\n\t\tspot_sub = self.spots[spot]\n\t\trootdir = spot_sub['rootdir']\n\t\t#---start with all files under rootdir\n\t\tfns = [os.path.join(dirpath,fn) \n\t\t\tfor (dirpath, dirnames, filenames) \n\t\t\tin os.walk(rootdir,followlinks=True) for fn in filenames]\n\t\t#---regex combinator is the only place where we enforce a naming convention via top,step,part\n\t\t#---note that we may wish to generalize this depending upon whether it is wise to have three parts\n\t\tregex = ('^%s\\/'%re.escape(rootdir.rstrip('/'))+\n\t\t\t'\\/'.join([spot_sub['top'],spot_sub['step'],spot_sub['part']])\n\t\t\t+'$')\n\t\tmatches_raw = [i.groups() for fn in fns for i in [re.search(regex,fn)] if i]\n\t\tif not matches_raw: \n\t\t\tstatus('no matches found for spot: \"%s,%s\"'%spot,tag='warning')\n\t\t\treturn\n\t\t#---first we organize the top,step,part into tuples which serve as keys\n\t\t#---we organize the toc as a doubly-nested dictionary of trajectory parts\n\t\t#---the top two levels of the toc correspond to the top and step signifiers\n\t\t#---note that this procedure projects the top,step,part naming convention into the toc\n\t\tmatches = [self.spots[spot]['divy_keys'](i) for i in matches_raw]\n\t\tself.toc[spot] = collections.OrderedDict()\n\t\t#---sort the tops into an ordered dictionary\n\t\tfor top in sorted(set(zip(*matches)[0])): \n\t\t\tself.toc[spot][top] = collections.OrderedDict()\n\t\t#---collect unique steps for each top and load them with the parts\n\t\tfor top in self.toc[spot]:\n\t\t\t#---sort the steps into an ordered dictionary\n\t\t\tfor step in sorted(set([i[1] for i in matches if i[0]==top])):\n\t\t\t\t#---we sort the parts into an ordered dictionary\n\t\t\t\t#---this is the leaf of the toc tree and we use dictionaries\n\t\t\t\tparts = sorted([i[2] for i in matches if i[0]==top and i[1]==step])\n\t\t\t\tself.toc[spot][top][step] = collections.OrderedDict([(part,{}) for part in parts])\n\t\t#---now the toc is prepared with filenames but subsequent parsings will identify EDR files", "def fromtables(pi, t, e):\n\n #sanity checks\n nStates=len(pi)\n assert(nStates==len(t) and nStates==len(e) and nStates>0)\n nObs=len(e[0])\n for i in range(nStates):\n assert(len(t[i])==nStates and len(e[i])==nObs)\n\n m=hmm(nStates, nObs)\n m.pi=deepcopy(pi)\n m.t=deepcopy(t)\n m.e=deepcopy(e)\n\n return m", "def visit(visitor: DocxTranslator, node: Node):\n assert isinstance(visitor, DocxTranslator)\n assert isinstance(node, Node)\n\n row = visitor.tables[-1][1]\n col = visitor.tables[-1][2]\n table = visitor.tables[-1][0]\n cell = table.cell(row, col)\n visitor.p_parents.append(cell)\n visitor.p = cell.paragraphs[0]", "def insert_table_after(table, paragraph):\n\n\ttbl, p = table._tbl, paragraph._p\n\tp.addnext(tbl)", "def uvozi_podatke(tabele):\n for t in tabele:\n t.uvozi()", "def initTable(self):\n sql = \"\"\" ( nodeId integer PRIMARY KEY,\n nextId integer,\n childId integer,\n label text);\n \"\"\"\n self.db.createTable(self.tableName + sql)\n # Reserve the first record as the head pointer, if it's not there\n found = self.db.selectById(self.tableName, 1)\n if not found:\n record = dict(nextId=None, childId=None, label='head pointer')\n self.db.insert(self.tableName, record)", "def siblings(self, siblings):\n\n self.logger.debug(\"In 'siblings' setter.\")\n\n self._siblings = siblings", "def learn_obo(self, obo):\n\n self.parents = [None] * len(self.row_names)\n for index, feature in enumerate(self.row_names):\n parent_names = obo.parents(feature)\n parent_indexes = [self.rows[_] for _ in parent_names]\n self.parents[index] = tuple(parent_indexes)", "def Rearrange(self, node):\n nnode = Node(node, \"%si\" % node.tag);\n nnode.children = node.children[1:];\n node.children[1:] = [nnode];", "def setup_table_for_epochs(table, timeseries, tag):\n table = table.copy()\n indices = np.searchsorted(timeseries.timestamps[:], table['start_time'].values)\n if len(indices > 0):\n diffs = np.concatenate([np.diff(indices), [table.shape[0] - indices[-1]]])\n else:\n diffs = []\n\n table['tags'] = [(tag,)] * table.shape[0]\n table['timeseries'] = [[[indices[ii], diffs[ii], timeseries]] for ii in range(table.shape[0])]\n return table", "def _traverse(p):\n # p[0] is the coordinate of this subtree (level + suffix)\n # p[1] is the start column of this subtree\n # p[2] is the end column of this subtree\n # p[3] is the subpart list\n # p[4] is the nonterminal or terminal/token at the head of this subtree\n col, option = p[0][0], p[0][1:] # Level of this subtree and option\n\n if not option:\n # No option: use a 'clean key' of NULL_TUPLE\n option = NULL_TUPLE\n else:\n # Convert list to a frozen (hashable) tuple\n option = tuple(option)\n\n while len(cols) <= col:\n # Add empty columns as required to reach this level\n cols.append(dict())\n\n # Add a tuple describing the rows spanned and the node info\n assert isinstance(p[4], Nonterminal) or isinstance(p[4], tuple)\n if option not in cols[col]:\n # Put in a dictionary entry for this option\n cols[col][option] = []\n cols[col][option].append((p[1], p[2], p[4]))\n\n # Navigate into subparts, if any\n if p[3]:\n for subpart in p[3]:\n _traverse(subpart)", "def split_node(node: saldag.OpNode):\n\n # Only dealing with single child case for now\n assert (len(node.children) <= 1)\n clone = copy.deepcopy(node)\n clone.out_rel.rename(node.out_rel.name + \"_obl\")\n clone.parents = set()\n clone.children = set()\n clone.is_mpc = True\n child = next(iter(node.children), None)\n saldag.insert_between(node, child, clone)", "def make_tree(self, l):\n\t\tfor el in l:\n\t\t\tself.insert(el)", "def test_getSiblings_nextOnly(self):\n previous, nextious = self.resolver.getSiblings(\n textId=\"urn:cts:latinLit:phi1294.phi002.perseus-lat2\", subreference=\"1.pr\"\n )\n self.assertEqual(\n previous, None,\n \"Previous Should not exist\"\n )\n self.assertEqual(\n nextious, \"1.1\",\n \"Next should be well computed\"\n )", "def split_on_whole_table(\n df: pyspark.DataFrame,\n ) -> pyspark.DataFrame:\n return df", "def extract_data(self, root, path, tag):\n data = []\n element = root.xpath(path)\n if element:\n url = self.PODEROPEDIA_BASE_URL + element[0].get('data-w2p_remote', None)\n if url:\n self.logger.debug('Querying {} from {}'.format(tag, url))\n try:\n response = self.session.get(url)\n response.raise_for_status()\n content = response.content\n html_tree = etree.HTML(content, parser=self.parser)\n if html_tree is None:\n return data\n rows = html_tree.xpath('.//*[starts-with(@id, \"collapse\")]/div/table/tr')\n for row in rows:\n target = target_name = target_path = relationship = None\n when = where = where_name = where_path = source = None\n row_id = row.get('id', '')\n cells = row.getchildren()\n idx = 0\n while idx < len(cells) - 1:\n try:\n cell_text = text_strip(cells[idx])\n except AttributeError:\n cell_text = ''\n sources = cells[idx].xpath('.//*[@class=\"fuente\"]')\n if len(sources) > 0:\n source = process_sources(cells[idx])\n elif cell_text == 'es' or cell_text == 'fue':\n when = cell_text\n idx = idx - 1\n target = cells[idx].find('a')\n if target is not None:\n target_path = target.get('href', None)\n target_name = text_strip(target)\n idx = idx + 2\n relationship = text_strip(cells[idx])\n elif cell_text == 'a' or cell_text == 'de':\n idx = idx - 1\n relationship = text_strip(cells[idx])\n idx = idx + 2\n target = cells[idx].find('a')\n if target is not None:\n target_path = target.get('href', None)\n target_name = text_strip(target)\n elif cell_text.startswith('desde'):\n when = cell_text\n elif 'es pasado' in cell_text:\n when = cell_text\n else:\n try:\n ignore = int(cell_text)\n when = cell_text\n except ValueError:\n potential_date = cell_text.split(' ')[0]\n try:\n ignore = datetime.strptime(potential_date, '%d-%m-%Y')\n when = cell_text\n except ValueError:\n try:\n ignore = datetime.strptime(potential_date, '%m-%Y')\n when = cell_text\n except ValueError:\n pass\n idx = idx + 1\n entry = {\n 'type': tag,\n 'target_path': target_path,\n 'relationship': relationship,\n 'when': when,\n 'where': where,\n 'source': source\n }\n data.append(entry)\n self.logger.debug('{}: {}'.format(tag, entry))\n except (requests.exceptions.HTTPError, etree.ParserError):\n self.logger.info('Something bad happened', exc_info=True)\n return data", "def updateSubhalos_old(host, file):\n f = open(file, 'r')\n line = f.readline()\n i = 0\n while line != '':\n if line[0:5] == \"#tree\":\n #if i%10000 == 0:\n #print 'subhalo finder scanned ', i, ' trees'\n i+=1\n num = int(line[6::])\n # Deal with a=0 halo independently\n line = f.readline()\n sub = MTH.MTHalo(line)\n if sub.pid == host.ID: # not upid. only subhalos, not subsub etc.\n #build tree, add to subhalo list of host\n tree = MT.MergerTree(file, num)\n tree.haloList.append(sub)\n if sub.num_prog ==0:\n tree.progenitors.append(sub)\n\n # Now deal with all other halos in the tree\n index = 1\n line = f.readline()\n while line !='' and line[0:5] != '#tree':\n halo = MTH.MTHalo(line)\n tree.haloList.append(halo)\n if halo.num_prog ==0:\n tree.progenitors.append(halo)\n updateLinks(tree.haloList, index)\n line = f.readline()\n index +=1\n # add a=1 subhalo to subhalo list of host (maybe should add tree?)\n host.subhalos.append(sub)\n else:\n line = f.readline()\n else:\n line = f.readline()\n f.close()", "def test_find_and_classify_nodes(self):\n\n this_node_table, this_triangle_to_node_table = (\n skeleton_lines._find_and_classify_nodes(\n polygon_object_xy=POLYGON_OBJECT_XY,\n new_edge_table=NEW_EDGE_TABLE,\n triangle_to_new_edge_table=TRIANGLE_TO_NEW_EDGE_TABLE,\n triangle_to_vertex_matrix=TRIANGLE_TO_VERTEX_MATRIX,\n end_node_vertex_indices=END_NODE_VERTEX_INDICES))\n\n self.assertTrue(_compare_tables(\n NODE_TABLE_SANS_CHILDREN, this_node_table))\n self.assertTrue(_compare_tables(\n TRIANGLE_TO_NODE_TABLE, this_triangle_to_node_table))", "def sit(self, table):\n self.table = table", "def parse_migration_tables(self, tabels_schema: MigrationTablesSchema):\n try:\n self.source_table = tabels_schema.migrationTable.SourceTable.dict()\n self.destination_table = tabels_schema.migrationTable.DestinationTable.dict()\n self.columns = tabels_schema.migrationTable.MigrationColumns\n except Exception as err:\n logger.error(\"parse_migration_tables [error] -> %s\" % err)", "def table_parser(table_files, study, outdir, timepoint=None, dtype=\"wide\",\n auto_type=False):\n # Welcome\n print(\"Starting tables parsing...\")\n\n # Check inputs\n if dtype not in (\"wide\", \"long\"):\n raise ValueError(\"Unexpected data type '{0}'.\".format(dtype))\n\n # Parse all the tables\n tables = []\n with progressbar.ProgressBar(max_value=len(table_files),\n redirect_stdout=True) as bar:\n for cnt, path in enumerate(table_files):\n\n # Open the TSV table\n with open(path, \"rt\") as open_file:\n raw_table = open_file.readlines()\n header = raw_table[0].rstrip(\"\\n\").split(\"\\t\")\n table_content = []\n for row in raw_table[1:]:\n row = row.rstrip(\"\\n\").split(\"\\t\")\n if auto_type:\n raise NotImplementedError(\n \"The automatic typing of columns has not been yet \"\n \"implemented.\")\n table_content.append(row)\n\n # Generate the final structure\n table = {}\n qname = os.path.basename(path).replace(\".tsv\", \"\")\n center = DEFAULT_CENTER\n if timepoint is None:\n timepoint = DEFAULT_TIMEPOINT\n for row_cnt, row in enumerate(table_content):\n assessment_id = \"{0}_q{1}_{2}\".format(\n study.lower(), qname, timepoint)\n subject = row[0].replace(\"sub-\", \"\")\n if dtype == \"wide\":\n assessment_id = \"{0}_{1}\".format(\n assessment_id, row_cnt + 1)\n assessment_id = \"{0}_{1}\".format(assessment_id, subject)\n\n # Create assessment structure\n assessment_struct = {\n \"identifier\": assessment_id,\n \"timepoint\": timepoint}\n\n # Build the subject questionnaires structure for this timepoint\n subj_questionnaires = {\n \"Questionnaires\": OrderedDict(),\n \"Assessment\": assessment_struct\n }\n\n # Fill the questionnaire structure\n qdata = OrderedDict()\n for question, answer in zip(header, row):\n question = question.decode(\"utf-8\", \"ignore\").encode(\n \"utf-8\")\n answer = answer.decode(\"utf-8\", \"ignore\").encode(\"utf-8\")\n qdata[question] = answer\n subj_questionnaires[\"Questionnaires\"][qname] = qdata\n\n # Add this questionnaire to the patient data\n if center not in table:\n table[center] = {}\n if subject not in table[center]:\n table[center][subject] = []\n table[center][subject].append(subj_questionnaires)\n\n # Saving result\n save_parsing(table, outdir, study, \"tables-{0}\".format(qname))\n tables.extend(glob.glob(\n os.path.join(outdir, \"tables-{0}*.json\".format(qname))))\n\n # Update progress bar\n bar.update(cnt)\n\n # Goodbye\n print(\"Done.\")\n\n return tables", "def parse_view_page(self):\n for row in self.driver.find_elements_by_css_selector(\"table\"):\n cells = row.find_elements_by_tag_name(\"td\")\n for cell in cells:\n yield cell.text", "def addTableRow(self, t):\r\n\r\n # Exits if no table was started\r\n assert self.tableHeader, \"No table was started\"\r\n\r\n # Retrieves table headers list depending of nesting level\r\n if self.subTableHeader != None:\r\n headers = self.subTableHeader\r\n indent = \" \"\r\n else:\r\n headers = self.tableHeader\r\n indent = \"\"\r\n\r\n # Adds the row in the resulting text\r\n self.text += \"<tr>\"\r\n for s in t:\r\n self.text += \"<td>\" + self.getHTMLText(s) + \"</td>\"\r\n self.text += \"</tr>\\n\"\r\n\r\n # Prints text on standard output using the table headers\r\n if self.verbosity >= 1 :\r\n for i in range(len(t)):\r\n print indent, headers[i], \" : \", t[i]\r\n print \"\"", "def expansion(self, block, table):\n return [block[x - 1] for x in table]", "def convertAllTabs(self: Self, event: Event = None) -> None:\n c = self\n u = c.undoer\n undoType = 'Convert All Tabs'\n current = c.p\n if g.app.batchMode:\n c.notValidInBatchMode(undoType)\n return\n theDict = c.scanAllDirectives(c.p)\n tabWidth = theDict.get(\"tabwidth\")\n count = 0\n u.beforeChangeGroup(current, undoType)\n for p in current.self_and_subtree():\n undoData = u.beforeChangeNodeContents(p)\n if p == current:\n changed = self.convertTabs(event)\n if changed:\n count += 1\n else:\n result = []\n changed = False\n text = p.v.b\n lines = text.split('\\n')\n for line in lines:\n i, w = g.skip_leading_ws_with_indent(line, 0, tabWidth)\n s = g.computeLeadingWhitespace(\n w, -abs(tabWidth)) + line[i:] # use negative width.\n if s != line:\n changed = True\n result.append(s)\n if changed:\n count += 1\n p.setDirty()\n p.setBodyString('\\n'.join(result))\n u.afterChangeNodeContents(p, undoType, undoData)\n u.afterChangeGroup(current, undoType)\n if not g.unitTesting:\n g.es(\"tabs converted to blanks in\", count, \"nodes\")", "def _parse_result_page(self, page):\n items = []\n table = list(page.findall(\".//table[@id='browse']\"))[0]\n for row in (x for x in list(table.findall('tr'))[1:]\n if len(x.getchildren()) != 1):\n item = self._parse_item_row(row)\n items.append(item)\n return items", "def _analyze_relationships(self):\n self._child_map = defaultdict(set)\n self._parent_map = defaultdict(set)\n\n for table, table_meta in self._metadata['tables'].items():\n if table_meta.get('use', True):\n for field_meta in table_meta['fields'].values():\n ref = field_meta.get('ref')\n if ref:\n parent = ref['table']\n self._child_map[parent].add(table)\n self._parent_map[table].add(parent)", "def __find_predecessors(\n storm_object_table, target_row, max_num_sec_id_changes,\n change_type_string, return_all_on_path):\n\n unique_times_unix_sec, orig_to_unique_indices = numpy.unique(\n storm_object_table[tracking_utils.VALID_TIME_COLUMN].values,\n return_inverse=True\n )\n\n target_time_unix_sec = storm_object_table[\n tracking_utils.VALID_TIME_COLUMN].values[target_row]\n\n this_time_index = numpy.where(\n unique_times_unix_sec == target_time_unix_sec\n )[0][0]\n\n predecessor_rows = []\n rows_in_frontier = numpy.array([target_row], dtype=int)\n id_change_counts_in_frontier = numpy.array([0], dtype=int)\n\n while this_time_index >= 0:\n these_current_rows = numpy.where(\n orig_to_unique_indices == this_time_index\n )[0]\n\n old_rows_in_frontier = copy.deepcopy(rows_in_frontier)\n old_id_change_counts = copy.deepcopy(id_change_counts_in_frontier)\n\n rows_in_frontier = []\n id_change_counts_in_frontier = []\n\n for this_row, this_num_changes in zip(\n old_rows_in_frontier, old_id_change_counts\n ):\n if this_row not in these_current_rows:\n rows_in_frontier.append(this_row)\n id_change_counts_in_frontier.append(this_num_changes)\n continue\n\n these_previous_rows = __find_immediate_predecessors(\n storm_object_table=storm_object_table, target_row=this_row)\n\n these_change_flags = (\n storm_object_table[tracking_utils.SECONDARY_ID_COLUMN].values[\n these_previous_rows] !=\n storm_object_table[tracking_utils.SECONDARY_ID_COLUMN].values[\n this_row]\n )\n\n if change_type_string == MERGER_STRING:\n this_merger_flag = (\n storm_object_table[\n tracking_utils.SECOND_PREV_SECONDARY_ID_COLUMN\n ].values[this_row] != ''\n )\n\n these_change_flags = numpy.logical_and(\n these_change_flags, this_merger_flag)\n\n if change_type_string == SPLIT_STRING:\n these_split_flags = (\n storm_object_table[\n tracking_utils.SECOND_NEXT_SECONDARY_ID_COLUMN\n ].values[these_previous_rows] != ''\n )\n\n these_change_flags = numpy.logical_and(\n these_change_flags, these_split_flags)\n\n these_previous_num_changes = (\n this_num_changes + these_change_flags.astype(int)\n )\n\n these_good_indices = numpy.where(\n these_previous_num_changes <= max_num_sec_id_changes\n )[0]\n\n these_previous_rows = these_previous_rows[these_good_indices]\n these_previous_num_changes = these_previous_num_changes[\n these_good_indices]\n\n add_this_row = (\n (len(these_previous_rows) == 0 and this_row != target_row) or\n return_all_on_path\n )\n\n if add_this_row:\n predecessor_rows.append(this_row)\n\n if len(these_previous_rows) != 0:\n rows_in_frontier += these_previous_rows.tolist()\n id_change_counts_in_frontier += (\n these_previous_num_changes.tolist()\n )\n\n this_time_index -= 1\n\n rows_in_frontier = numpy.array(rows_in_frontier, dtype=int)\n id_change_counts_in_frontier = numpy.array(\n id_change_counts_in_frontier, dtype=int)\n\n rows_in_frontier, these_unique_indices = numpy.unique(\n rows_in_frontier, return_index=True)\n id_change_counts_in_frontier = id_change_counts_in_frontier[\n these_unique_indices]\n\n return numpy.array(predecessor_rows + rows_in_frontier.tolist(), dtype=int)", "def _get_and_create_relation_objects(self, root_node):\n relations = []\n\n for relation in root_node.iterdescendants(\"TLINK\"):\n lid = relation.get(\"lid\")\n\n # Get relation type as a string\n relation_type = relation.get(\"relType\")\n\n # Get relation_type_id\n relation_type_id = RelationType.get_id(relation_type)\n\n if not relation.get(\"timeID\") and not relation.get(\"relatedToTime\"):\n # This is event-event\n source_eiid = relation.get(\"eventInstanceID\")\n target_eiid = relation.get(\"relatedToEventInstance\")\n\n # Find source event\n source_obj = self.find_event_by_eiid(self.events, source_eiid)\n # Find target event\n target_obj = self.find_event_by_eiid(self.events, target_eiid)\n\n else:\n # This must be event-timex or timex-event or timex-timex\n target_tid = relation.get(\"relatedToTime\")\n target_eiid = relation.get(\"relatedToEventInstance\")\n\n source_tid = relation.get(\"timeID\")\n source_eiid = relation.get(\"eventInstanceID\")\n\n\n if source_tid and target_eiid:\n # timex-event\n source_obj = self.find_timex_by_tid(source_tid)\n target_obj = self.find_event_by_eiid(self.events, target_eiid)\n elif source_eiid and target_tid:\n # event-timex\n source_obj = self.find_event_by_eiid(self.events, source_eiid)\n target_obj = self.find_timex_by_tid(target_tid)\n elif source_tid and target_tid:\n # timex-timex\n source_obj = self.find_timex_by_tid(source_tid)\n target_obj = self.find_timex_by_tid(target_tid)\n\n relation_obj = Relation(lid, self.text_obj, source_obj, target_obj, relation_type_id)\n\n # So we don't run into problems with helper.output\n if relation_obj.is_timex_timex(): relation_obj.predicted_class = relation_type_id\n\n # There are sometimes duplicates which we do not want to have\n if relation_obj not in relations:\n relations.append(relation_obj)\n\n return relations" ]
[ "0.5702697", "0.56278515", "0.55263174", "0.5384425", "0.5336966", "0.52965224", "0.52801716", "0.5213338", "0.51913345", "0.5113082", "0.5075567", "0.5038085", "0.50110316", "0.49885792", "0.49777687", "0.49660102", "0.49603015", "0.49517924", "0.4944479", "0.49234688", "0.49149168", "0.48444274", "0.4844128", "0.48233908", "0.48038", "0.48030627", "0.47778618", "0.4759175", "0.47583216", "0.474778", "0.47148475", "0.47103056", "0.47021845", "0.46813378", "0.4643223", "0.46351233", "0.46123296", "0.4612188", "0.46048096", "0.4604561", "0.4602734", "0.45985395", "0.45853394", "0.4584118", "0.45701978", "0.4569987", "0.45691764", "0.45634627", "0.45626736", "0.45512086", "0.4550535", "0.45455727", "0.45377305", "0.45259818", "0.45168903", "0.4510925", "0.45074698", "0.4488068", "0.44836235", "0.44674093", "0.44665003", "0.44645306", "0.44578174", "0.4454705", "0.44479173", "0.4443805", "0.4437169", "0.44228372", "0.44176716", "0.44112447", "0.44086868", "0.44027603", "0.43955994", "0.4370487", "0.43700212", "0.4358449", "0.43529072", "0.43518037", "0.4346487", "0.43450797", "0.43417788", "0.4333812", "0.4333556", "0.43307388", "0.43249527", "0.4322602", "0.43176287", "0.43175238", "0.431714", "0.4309432", "0.43082008", "0.4305113", "0.4304047", "0.42959058", "0.4293959", "0.42939192", "0.42925653", "0.42876264", "0.42814183", "0.42749542" ]
0.5284191
6
Wait on an IPython AsyncResult, printing progress to stdout. Based on wait_interactive() in IPython and the output of Joblib in verbose mode.parallel This will work best when using a loadbalanced view with a smallish chunksize.
def wait_progress(ar, interval=5, timeout=-1): if timeout is None: timeout = -1 N = len(ar) tic = time.time() print "\nRunning %i tasks:" % N sys.stdout.flush() last = 0 while not ar.ready() and (timeout < 0 or time.time() - tic <= timeout): ar.wait(interval) progress, elapsed = ar.progress, ar.elapsed if progress > last: last = progress remaining = elapsed * (float(N) / progress - 1.) print ' Done %4i out of %4i | elapsed: %s remaining: %s' % ( progress, N, short_format_time(elapsed), short_format_time(remaining)) sys.stdout.flush() if ar.ready(): try: speedup = round(100.0 * ar.serial_time / ar.wall_time) print "\nParallel speedup: %i%%" % speedup # For some reason ar.serial_time occasionally throws this exception. # We choose to ignore it and just not display the speedup factor. except TypeError: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wait_progress(self):\n pass", "def wait_progress(self):\n pass", "def waitOutput( self, verbose=False ):\n log = info if verbose else debug\n output = ''\n while self.waiting:\n data = self.monitor()\n output += data\n log( data )\n return output", "def print_progress(self):\n\n if not self.verbose:\n return\n\n elapsed_time = time.time() - self._start_time\n\n if self._is_completed():\n # Make sure that we get a last message telling us we are done\n self._print(\n f\"Done {self.n_completed_tasks:3d} out of \"\n f\"{self.n_completed_tasks:3d} | elapsed: \"\n f\"{short_format_time(elapsed_time)} finished\"\n )\n return\n\n # Original job iterator becomes None once it has been fully\n # consumed : at this point we know the total number of jobs and we are\n # able to display an estimation of the remaining time based on already\n # completed jobs. Otherwise, we simply display the number of completed\n # tasks.\n elif self._original_iterator is not None:\n if _verbosity_filter(self.n_dispatched_batches, self.verbose):\n return\n self._print(\n f\"Done {self.n_completed_tasks:3d} tasks | elapsed: \"\n f\"{short_format_time(elapsed_time)}\"\n )\n else:\n index = self.n_completed_tasks\n # We are finished dispatching\n total_tasks = self.n_dispatched_tasks\n # We always display the first loop\n if not index == 0:\n # Display depending on the number of remaining items\n # A message as soon as we finish dispatching, cursor is 0\n cursor = (total_tasks - index + 1 -\n self._pre_dispatch_amount)\n frequency = (total_tasks // self.verbose) + 1\n is_last_item = (index + 1 == total_tasks)\n if (is_last_item or cursor % frequency):\n return\n remaining_time = (elapsed_time / index) * \\\n (self.n_dispatched_tasks - index * 1.0)\n # only display status if remaining time is greater or equal to 0\n self._print(\n f\"Done {index:3d} out of {total_tasks:3d} | elapsed: \"\n f\"{short_format_time(elapsed_time)} remaining: \"\n f\"{short_format_time(remaining_time)}\"\n )", "def wait(self, timeout: float = None) -> CompletedProcess: # type: ignore\n if self.stdout is None:\n return CompletedProcess(self.args, returncode=super().wait(timeout=timeout), stdout=None)\n else:\n stdout = []\n while self.poll() is None:\n stdout.append(line := self.stdout.readline())\n\n if self.verbose:\n print(line, end=\"\")\n\n return CompletedProcess(self.args, returncode=self.poll(), stdout=\"\".join(stdout))", "def _print_progress(self):\n \n print 'Completed %d of %d' %(self.progress_id, self.total_work)\n self.progress_id += 1", "def query_job_progress():\n pass", "def wait(self, timeout=None, live_progress=False):\n\n live_progress = live_progress and sys.stdout.isatty()\n\n if live_progress:\n try:\n widgets = [\n Percentage(),\n ' ', AnimatedMarker(),\n ' ', Bar(),\n ' ', AdaptiveETA()\n ]\n progressbar = ProgressBar(widgets=widgets, max_value=100)\n except Exception:\n live_progress = False\n\n start = time.time()\n if self._uuid is None:\n self.update(True)\n return False\n\n nap = min(10, timeout) if timeout is not None else 10\n\n self.update(True)\n while self._state in RUNNING_DOWNLOADING_STATES:\n if live_progress:\n n = 0\n progress = 0\n while True:\n time.sleep(1)\n n += 1\n if n >= nap:\n break\n progress = self.status.execution_progress if self.status is not None else 0\n progress = max(0, min(progress, 100))\n progressbar.update(progress)\n else:\n time.sleep(nap)\n\n self.update(True)\n\n if timeout is not None:\n elapsed = time.time() - start\n if timeout <= elapsed:\n self.update()\n return False\n else:\n nap = min(10, timeout - elapsed)\n self.update(True)\n if live_progress:\n progressbar.finish()\n return True", "def print_progress(self, info_dict):\n if self.n_print != 0:\n t = info_dict['t']\n if t == 1 or t % self.n_print == 0:\n string = 'Iteration {0}'.format(str(t).rjust(len(str(self.n_iter))))\n string += ' [{0}%]'.format(str(int(t / self.n_iter * 100)).rjust(3))\n print(string)", "def call_progress_bar(result_parts, line_no):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n try:\n from tqdm.autonotebook import tqdm as tqdm_notebook\n except ImportError:\n raise ImportError(\"Please pip install tqdm to use the progress bar\")\n from IPython import get_ipython\n\n try:\n cell_no = get_ipython().execution_count\n # This happens if we are not in ipython or jupyter.\n # No progress bar is supported in that case.\n except AttributeError:\n return\n pbar_id = f\"{cell_no}-{line_no}\"\n futures = [\n block\n for row in result_parts\n for partition in row\n for block in partition.list_of_blocks\n ]\n bar_format = (\n \"{l_bar}{bar}{r_bar}\"\n if \"DEBUG_PROGRESS_BAR\" in os.environ\n and os.environ[\"DEBUG_PROGRESS_BAR\"] == \"True\"\n else \"{desc}: {percentage:3.0f}%{bar} Elapsed time: {elapsed}, estimated remaining time: {remaining}\"\n )\n bar_lock.acquire()\n if pbar_id in progress_bars:\n if hasattr(progress_bars[pbar_id], \"container\"):\n if hasattr(progress_bars[pbar_id].container.children[0], \"max\"):\n index = 0\n else:\n index = 1\n progress_bars[pbar_id].container.children[index].max = progress_bars[\n pbar_id\n ].container.children[index].max + len(futures)\n progress_bars[pbar_id].total = progress_bars[pbar_id].total + len(futures)\n progress_bars[pbar_id].refresh()\n else:\n progress_bars[pbar_id] = tqdm_notebook(\n total=len(futures),\n desc=\"Estimated completion of line \" + str(line_no),\n bar_format=bar_format,\n )\n bar_lock.release()\n\n threading.Thread(target=_show_time_updates, args=(progress_bars[pbar_id],)).start()\n\n modin_engine = Engine.get()\n engine_wrapper = None\n if modin_engine == \"Ray\":\n from modin.core.execution.ray.common.engine_wrapper import RayWrapper\n\n engine_wrapper = RayWrapper\n elif modin_engine == \"Unidist\":\n from modin.core.execution.unidist.common.engine_wrapper import UnidistWrapper\n\n engine_wrapper = UnidistWrapper\n else:\n raise NotImplementedError(\n f\"ProgressBar feature is not supported for {modin_engine} engine.\"\n )\n\n for i in range(1, len(futures) + 1):\n engine_wrapper.wait(futures, num_returns=i)\n progress_bars[pbar_id].update(1)\n progress_bars[pbar_id].refresh()\n if progress_bars[pbar_id].n == progress_bars[pbar_id].total:\n progress_bars[pbar_id].close()", "def tqdm_joblib(tqdm_object):\n\n def tqdm_print_progress(self):\n if self.n_completed_tasks > tqdm_object.n:\n n_completed = self.n_completed_tasks - tqdm_object.n\n tqdm_object.update(n=n_completed)\n\n original_print_progress = joblib.parallel.Parallel.print_progress\n joblib.parallel.Parallel.print_progress = tqdm_print_progress\n\n try:\n yield tqdm_object\n finally:\n joblib.parallel.Parallel.print_progress = original_print_progress\n tqdm_object.close()", "def printProgressBar(iteration, total, pbar=False, prefix = '', suffix = '', decimals = 1, length = 50, fill = 'X', verbose=False):\n\n from .module_exists import module_exists\n from .in_ipynb import in_ipynb\n\n if module_exists('tqdm'):\n if type(pbar) == bool:\n if in_ipynb():\n if verbose: print('- NOTEBOOK MODE -')\n from tqdm import tqdm_notebook as tqdm\n else:\n if verbose: print('- PYTHON/BASH MODE -')\n from tqdm import tqdm\n pbar = tqdm(total=total)\n pbar.update(iteration)\n else:\n pbar.update(iteration-pbar.last_print_n)\n if iteration == total: pbar.close()\n return pbar\n\n else:\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n #print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n print('\\r{0} |{1}| %{2} %{3}'.format(prefix, bar, percent, suffix))\n # Print New Line on Complete\n if iteration == total:\n print()", "def wait_for_processing(self, job_id: str, show_progress: bool = False) -> None:\n # How often to poll Harmony for updated information during job processing.\n check_interval = 3.0 # in seconds\n # How often to refresh the screen for progress updates and animating spinners.\n ui_update_interval = 0.33 # in seconds\n\n intervals = round(check_interval / ui_update_interval)\n if show_progress:\n with progressbar.ProgressBar(max_value=100, widgets=progressbar_widgets) as bar:\n progress = 0\n while progress < 100:\n progress, status = self.progress(job_id)\n if status == 'failed':\n raise Exception('Job has failed. Call result_json() to learn more.')\n break\n if status == 'canceled':\n print('Job has been canceled.')\n break\n # This gets around an issue with progressbar. If we update() with 0, the\n # output shows up as \"N/A\". If we update with, e.g. 0.1, it rounds down or\n # truncates to 0 but, importantly, actually displays that.\n if progress == 0:\n progress = 0.1\n\n for _ in range(intervals):\n bar.update(progress) # causes spinner to rotate even when no data change\n sys.stdout.flush() # ensures correct behavior in Jupyter notebooks\n if progress >= 100:\n break\n else:\n time.sleep(ui_update_interval)\n else:\n progress = 0\n while progress < 100:\n progress, status = self.progress(job_id)\n if status == 'failed':\n raise Exception('Job has failed. Call result_json() to learn more.')\n break\n if status == 'canceled':\n break\n time.sleep(check_interval)", "def print_progress(self, index):\r\n if not self.verbose:\r\n return\r\n elapsed_time = time.time() - self._start_time\r\n\r\n # This is heuristic code to print only 'verbose' times a messages\r\n # The challenge is that we may not know the queue length\r\n if self._original_iterable:\r\n if _verbosity_filter(index, self.verbose):\r\n return\r\n self._print('Done %3i jobs | elapsed: %s',\r\n (index + 1,\r\n short_format_time(elapsed_time),\r\n ))\r\n else:\r\n # We are finished dispatching\r\n queue_length = self.n_dispatched\r\n # We always display the first loop\r\n if not index == 0:\r\n # Display depending on the number of remaining items\r\n # A message as soon as we finish dispatching, cursor is 0\r\n cursor = (queue_length - index + 1\r\n - self._pre_dispatch_amount)\r\n frequency = (queue_length // self.verbose) + 1\r\n is_last_item = (index + 1 == queue_length)\r\n if (is_last_item or cursor % frequency):\r\n return\r\n remaining_time = (elapsed_time / (index + 1) *\r\n (self.n_dispatched - index - 1.))\r\n self._print('Done %3i out of %3i | elapsed: %s remaining: %s',\r\n (index + 1,\r\n queue_length,\r\n short_format_time(elapsed_time),\r\n short_format_time(remaining_time),\r\n ))", "def _print_progress(self):\n if self.current_training_size % 1000 == 0:\n print(self.current_training_size, end='')\n elif self.current_training_size % 100 == 0:\n print('.', end='')", "def make_show_progress():\n \n start_time = time.time()\n lines_read = 0\n\n def show_progress(chunk_length):\n \"\"\"Displays a progress line. Created by make_show_progress.\"\"\"\n \n nonlocal lines_read\n\n lines_read += chunk_length\n elapsed_time = int(time.time() - start_time)\n print('{:,} lines read | time {:,}s'.format(lines_read, elapsed_time))\n\n return show_progress", "def _step_waiting(self, silent=False):\n if not silent and self._waiting_steps == 0:\n sys.stdout.write(\"Waiting for resources \")\n self._waiting_steps += 1\n self._print_step_and_wait(self._waiting_steps, silent=silent)", "def wait_process_completion(remote_command_executor, pid):\n logging.info(\"Waiting for performance test to complete\")\n command = f\"\"\"\n ps --pid {pid} > /dev/null\n [ \"$?\" -ne 0 ] && echo \"COMPLETE\" || echo \"RUNNING\"\n \"\"\"\n result = remote_command_executor.run_remote_command(command)\n if result.stdout == \"RUNNING\":\n raise Exception(\"The process is still running\")\n else:\n return result.stdout.strip()", "def _printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '$'):\r\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\r\n filledLength = int(length * iteration // total)\r\n bar = fill * filledLength + '-' * (length - filledLength)\r\n sys.stdout.write('\\r{} |{}| {}% {}'.format(prefix, bar, percent, suffix))\r\n # Print New Line on Complete\r\n if iteration == total: \r\n print()", "def long_task(self):\n verb = ['Starting up', 'Booting', 'Repairing', 'Loading', 'Checking']\n adjective = ['master', 'radiant', 'silent', 'harmonic', 'fast']\n noun = ['solar array', 'particle reshaper', 'cosmic ray', 'orbiter', 'bit']\n message = ''\n total = random.randint(10, 50)\n for i in range(total):\n if not message or random.random() < 0.25:\n message = '{0} {1} {2}...'.format(random.choice(verb),\n random.choice(adjective),\n random.choice(noun))\n self.update_state(state='PROGRESS',\n meta={'current': i, 'total': total,\n 'status': message})\n time.sleep(1)\n return {'current': 100, 'total': 100, 'status': 'Task completed!',\n 'result': 42}", "def long_task(self):\n verb = ['Starting up', 'Booting', 'Repairing', 'Loading', 'Checking']\n adjective = ['master', 'radiant', 'silent', 'harmonic', 'fast']\n noun = ['solar array', 'particle reshaper', 'cosmic ray', 'orbiter', 'bit']\n message = ''\n total = random.randint(10, 50)\n for i in range(total):\n if not message or random.random() < 0.25:\n message = '{0} {1} {2}...'.format(random.choice(verb),\n random.choice(adjective),\n random.choice(noun))\n self.update_state(state='PROGRESS',\n meta={'current': i, 'total': total,\n 'status': message})\n time.sleep(1)\n return {'current': 100, 'total': 100, 'status': 'Task completed!',\n 'result': 42}", "def long_task(self):\n verb = ['Starting up', 'Booting', 'Repairing', 'Loading', 'Checking']\n adjective = ['master', 'radiant', 'silent', 'harmonic', 'fast']\n noun = ['solar array', 'particle reshaper', 'cosmic ray', 'orbiter', 'bit']\n message = ''\n total = random.randint(10, 50)\n for i in range(total):\n if not message or random.random() < 0.25:\n message = '{0} {1} {2}...'.format(random.choice(verb),\n random.choice(adjective),\n random.choice(noun))\n self.update_state(state='PROGRESS',\n meta={'current': i, 'total': total,\n 'status': message})\n time.sleep(1)\n return {'current': 100, 'total': 100, 'status': 'Task completed!',\n 'result': 42}", "def long_task(self):\n verb = ['Starting up', 'Booting', 'Repairing', 'Loading', 'Checking']\n adjective = ['master', 'radiant', 'silent', 'harmonic', 'fast']\n noun = ['solar array', 'particle reshaper', 'cosmic ray', 'orbiter', 'bit']\n message = ''\n total = random.randint(10, 50)\n for i in range(total):\n if not message or random.random() < 0.25:\n message = '{0} {1} {2}...'.format(random.choice(verb),\n random.choice(adjective),\n random.choice(noun))\n self.update_state(state='PROGRESS',\n meta={'current': i, 'total': total,\n 'status': message})\n time.sleep(1)\n return {'current': 100, 'total': 100, 'status': 'Task completed!',\n 'result': 42}", "def long_task(self):\n verb = ['Starting up', 'Booting', 'Repairing', 'Loading', 'Checking']\n adjective = ['master', 'radiant', 'silent', 'harmonic', 'fast']\n noun = ['solar array', 'particle reshaper', 'cosmic ray', 'orbiter', 'bit']\n message = ''\n total = random.randint(10, 50)\n for i in range(total):\n if not message or random.random() < 0.25:\n message = '{0} {1} {2}...'.format(random.choice(verb),\n random.choice(adjective),\n random.choice(noun))\n self.update_state(state='PROGRESS',\n meta={'current': i, 'total': total,\n 'status': message})\n time.sleep(1)\n return {'current': 100, 'total': 100, 'status': 'Task completed!',\n 'result': 42}", "def show_progress(chunk_length):\n \n nonlocal lines_read\n\n lines_read += chunk_length\n elapsed_time = int(time.time() - start_time)\n print('{:,} lines read | time {:,}s'.format(lines_read, elapsed_time))", "def log_progress(sequence, every=None, size=None, name='Items'):\n from ipywidgets import IntProgress, HTML, VBox\n from IPython.display import display\n\n is_iterator = False\n if size is None:\n try:\n size = len(sequence)\n except TypeError:\n is_iterator = True\n if size is not None:\n if every is None:\n if size <= 200:\n every = 1\n else:\n every = int(size / 200) # every 0.5%\n else:\n assert every is not None, 'sequence is iterator, set every'\n\n if is_iterator:\n progress = IntProgress(min=0, max=1, value=1)\n progress.bar_style = 'info'\n else:\n progress = IntProgress(min=0, max=size, value=0)\n label = HTML()\n box = VBox(children=[label, progress])\n display(box)\n\n index = 0\n try:\n for index, record in enumerate(sequence, 1):\n if index == 1 or index % every == 0:\n if is_iterator:\n label.value = '{name}: {index} / ?'.format(\n name=name,\n index=index\n )\n else:\n progress.value = index\n label.value = '{name}: {index} / {size}'.format(\n name=name,\n index=index,\n size=size\n )\n yield record\n except:\n progress.bar_style = 'danger'\n raise\n else:\n progress.bar_style = 'success'\n progress.value = index\n label.value = \"{name}: {index}\".format(\n name=name,\n index=str(index or '?')\n )", "def wait_for_complete(self, max_wait_sec=60, print_waiting=False):\n\n sleep_time_sec = 10.0\n t0 = time.perf_counter()\n status = self.check_status()\n last_status = ''\n count = 0\n while status != 'COMPLETED':\n if print_waiting:\n if last_status != status:\n print(status)\n count = 0\n if count == 40:\n count = 0\n print()\n print('.', end='')\n elapsed = time.perf_counter() - t0\n if elapsed > max_wait_sec:\n raise RuntimeError(\n f'Computation has exceeded desired wait period of {max_wait_sec} sec.')\n last_status = status\n time.sleep(sleep_time_sec)\n status = self.check_status()", "def parallel_run():\n from IPython.parallel import Client\n\n c = Client() # here is where the client establishes the connection\n lv = c.load_balanced_view() # this object represents the engines (workers)\n\n\n rays = []\n maxs=25\n bounding = AABA(xmin=0, ymin=0, zmin=0, xmax=maxs, ymax=maxs, zmax=maxs,)\n gridd = np.zeros((maxs,maxs,maxs))\n # spectrum for red to nir leaves\n red_nir_leaves = spectrum(np.array([0.5, 0.85]), np.array([0.1, 0.6]), np.array([0.5, 0.1]))\n # spectrum for soil\n red_nir_soil = spectrum(np.array([0.5, 0.85]), np.array([0.3, 0.4]), np.array([0.0, 0.0]))\n\n\n # scattering setup\n scatt = BRDSF(red_nir_leaves, 0.0)\n lf = leaf(55.0, 0.8) # leaf angle distribution and leaf area density\n\n\n tasks = []\n for x in xrange(maxs):\n for y in xrange(maxs):\n tasks.append(lv.apply(prun, x,y, maxs, gridd, scatt, red_nir_soil, bounding, lf))\n\n result = [task.get() for task in tasks] # blocks until all results are back\n\n return results", "def _monitor_progress(conf_list, event, return_dict, verbose, in_interactive, interval=0.1):\n toolbar_width = 40\n bar_symbol = '-'\n total_p = len(conf_list)\n current_p = len(return_dict)\n\n if verbose:\n while current_p < total_p:\n _update_progress(current_p / total_p, is_ipy=in_interactive)\n time.sleep(interval)\n current_p = len(return_dict)\n _update_progress(1, is_ipy=in_interactive)\n print('\\n')", "def wait(self):\n\n for output in self.proc.communicate():\n if output is not None:\n self.output += output", "def _log_progress(self):\n self.num_of_requests_in_pipeline += 1\n if self.num_of_requests_in_pipeline % 20 == 0:\n print('-' * 200)\n print(f'DB PIPELINE: {self.num_of_requests_in_pipeline} items wenth though pipeline.')\n print('-' * 200)", "def download_progress_callback(block_num, block_size, expected_size):\n total_blocks = int(math.ceil(expected_size / block_size))\n progress_increment = int(math.ceil(total_blocks / 100))\n\n if block_num % progress_increment == 0:\n sys.stdout.write(\".\")\n sys.stdout.flush()\n if block_num * block_size >= expected_size:\n print(\"\")", "def _printProgressBar(self, fractionComplete):\n import sys\n nInc = 50\n count = int(nInc * fractionComplete)\n proBar = \"|\"\n for i in range(nInc):\n if i < count:\n proBar += \"-\"\n else:\n proBar += \" \"\n proBar += \"|\"\n print((proBar, int(fractionComplete * 100), \"%\\r\",))\n sys.stdout.flush()\n\n return", "def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total: \n print()", "def dl_progress(count, block_size, total_size):\n length = 50\n current_size = count * block_size\n done = current_size * length // total_size\n togo = length - done\n prog = \"[\" + done * \"=\" + togo * \"-\" + \"]\"\n sys.stdout.write(prog)\n if(current_size < total_size):\n sys.stdout.write(\"\\r\")\n else:\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()", "def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r{0} |{1}| {2}% {3}'.format(prefix, bar, percent, suffix), end = printEnd)\n # Print New Line on Complete\n if iteration == total: \n print()", "def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = printEnd)\n # Print New Line on Complete\n if iteration == total: \n print()", "def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = printEnd)\n # Print New Line on Complete\n if iteration == total: \n print()", "def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = printEnd)\n # Print New Line on Complete\n if iteration == total: \n print()", "def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = printEnd)\n # Print New Line on Complete\n if iteration == total: \n print()", "def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = printEnd)\n # Print New Line on Complete\n if iteration == total: \n print()", "def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 50, fill = '█', printEnd = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = printEnd)\n # Print New Line on Complete\n if iteration == total: \n print()", "def progress_status(self):\n from tqdm import tqdm\n pbar_a = tqdm(total=len(self.jobs), position=0)\n pbar_a.set_description('Submitted jobs ...')\n pbar_b = tqdm(total=self.n_submit_script, position=1)\n pbar_b.set_description('Running jobs ...')\n pbar_c = tqdm(total=self.n_submit_script, position=2)\n pbar_c.set_description('Completed jobs ...')\n pbar_d = tqdm(total=self.n_submit_script, position=3)\n pbar_d.set_description('Failed? jobs ...')\n while self.n_completed < self.n_submit_script:\n pbar_a.n = self.n_submitted\n pbar_b.n = self.n_running\n pbar_c.n = self.n_completed\n pbar_d.n = self.n_failed + self.n_unknown\n pbar_a.refresh()\n pbar_b.refresh()\n pbar_c.refresh()\n pbar_d.refresh()\n sleep(5)\n self.update_status()", "def eprint(eeobject, indent=2, notebook=False, async=False):\n\n import pprint\n pp = pprint.PrettyPrinter(indent=indent)\n\n def get_async(eeobject, result):\n obj = ee.deserializer.decode(eeobject)\n try:\n result['result'] = obj.getInfo()\n except:\n raise\n\n def get_async2(eeobject, result):\n info = eeobject.getInfo()\n result.append(info)\n\n try:\n if async:\n manager = multiprocessing.Manager()\n info = manager.list()\n proxy = ee.serializer.encode(eeobject)\n process = multiprocessing.Process(target=get_async2, args=(eeobject, info))\n process.start()\n # process.join()\n else:\n info = eeobject.getInfo()\n\n except Exception as e:\n print(str(e))\n info = eeobject\n\n if not notebook:\n if async:\n def finalwait():\n isinfo = len(info) > 0\n while not isinfo:\n isinfo = len(info) > 0\n pp.pprint(info[0])\n p = multiprocessing.Process(target=finalwait, args=())\n p.start()\n else:\n pp.pprint(info)\n else:\n from geetools.ui.ipytools import create_accordion\n from IPython.display import display\n output = create_accordion(info)\n display(output)", "def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\r\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\r\n filledLength = int(length * iteration // total)\r\n bar = fill * filledLength + '-' * (length - filledLength)\r\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = printEnd)\r\n # Print New Line on Complete\r\n if iteration == total: \r\n print()", "def report_learning_progress(istep, nsteps, VC_HC, GS_HC, norm, mrh, mhh,\n rop=None):\n\n rep = '{}%'.format(int(100*istep/nsteps)).rjust(4)\n l_vc_hc = analysis.VC_HC_norm(VC_HC, norm).mean()\n l_gs_hc = analysis.GS_HC_norm(GS_HC, norm).mean()\n prog = '{} | VC-HC: {:.3f}, GS-HC: {:.3f}'.format(rep, l_vc_hc, l_gs_hc)\n prog += ' | H real: {:.2f}, H HC: {:.2f}'.format(mrh, mhh)\n if rop is not None:\n prog += ' | ro pow: {:.1f}'.format(rop)\n print(prog)", "def test_printProgressBarReporting(self):\n # Use a short, known console width because this simple test doesn't\n # need to test the console padding.\n self.setKnownConsoleSize(10, 34)\n clock = self.client.reactor = Clock()\n wrapped = BytesIO(b\"x\")\n wrapped.name = b\"sample\"\n wrapper = cftp.FileWrapper(wrapped)\n wrapper.size = 1024 * 10\n startTime = clock.seconds()\n clock.advance(2.0)\n wrapper.total += 4096\n\n self.client._printProgressBar(wrapper, startTime)\n\n if _PY3:\n result = b\"\\rb'sample' 40% 4.0kB 2.0kBps 00:03 \"\n else:\n result = \"\\rsample 40% 4.0kB 2.0kBps 00:03 \"\n self.assertEqual(self.client.transport.value(), result)", "def progressbar(iterator, verbosity, length=None):\n\n if verbosity == logging.INFO:\n if not length:\n length = len(iterator)\n\n with click.progressbar(iterator, length=length) as _iterator:\n yield _iterator\n else:\n yield iterator", "def update_progress(job_title, progress):\n \n length = 20 # modify this to change the length\n block = int(round(length*progress))\n msg = \"\\r{0}: [{1}] {2}%\".format(job_title, \"#\"*block + \"-\"*(length-block), round(progress*100, 2))\n if progress >= 1: msg += \" DONE\\r\\n\"\n sys.stdout.write(msg)\n sys.stdout.flush()", "def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)\n # Print New Line on Complete\n if iteration == total:\n print()", "def printWaiting(self):\n\t\tfor wait in self.w:\n\t\t\tw_print=\"\"\n\t\t\tfor c in wait:\n\t\t\t\tif c:\n\t\t\t\t\tw_print += str(c[1])\n\t\t\t\telse:\n\t\t\t\t\tw_print += 'NO'\n\t\t\t\tw_print += \" \"\n\t\t\tprint w_print", "def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)\n # Print New Line on Complete\n if iteration == total: \n print()", "def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)\n # Print New Line on Complete\n if iteration == total: \n print()", "def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)\n # Print New Line on Complete\n if iteration == total: \n print()", "def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)\n # Print New Line on Complete\n if iteration == total: \n print()", "def wait_progress(self, silent=False):\n if self._is_started:\n jobstate = self._get_job_state()\n try:\n while all([jobstate.find(s) < 0 for s in self._end_states]):\n if any([jobstate.find(s) >= 0 for s in self._wait_states]):\n self._step_waiting(silent=silent)\n if any([jobstate.find(s) >= 0 for s in self._run_states]):\n if(self._waiting_steps > 0 and self._running_steps == 0):\n if not silent:\n sys.stdout.write(\"\\n\")\n self._step_running(silent=silent)\n jobstate = self._get_job_state()\n except KeyboardInterrupt:\n sys.stdout.write(\"Terminate job {0} \\n\".format(self._jobid))\n sys.stdout.flush()\n check_call((['scancel', str(self._jobid)]))\n time.sleep(1)\n jobstate = self._get_job_state()\n self._is_terminated = True\n\n if not silent:\n if(self._waiting_steps > 0 or self._running_steps > 0):\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"End batch job {0} Status: {1}\\n\".format(\n self._jobid, jobstate))\n sys.stdout.write(\"Slurm command was : \" + \" \".join(self.cmd) + \"\\n\")\n sys.stdout.flush()", "def printProgressBar(iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = printEnd)\n # Print New Line on Complete\n if iteration == total:\n print()", "def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 50, fill = '█', printEnd = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd, flush=True)\n # Print New Line on Complete\n if iteration == total: \n print()", "def runLongTask(self):\n for i in range(5):\n sleep(1)\n self.reportProgress(i + 1)", "def parallel_call(call, args, callback=None, workers=10, show_progress=None, progress_title='Progress'):\n from multiprocessing import Pool, TimeoutError\n\n signal.signal(signal.SIGTERM, lambda *args: sys.exit(1))\n pool = Pool(workers, lambda: signal.signal(signal.SIGINT, signal.SIG_IGN))\n\n def to_tuple(a):\n return a if isinstance(a, (list, tuple, set)) else [a]\n\n try:\n async_results = [(arg, pool.apply_async(call, to_tuple(arg), callback=callback)) for arg in args]\n\n results = {}\n while len(results) != len(async_results):\n for arg, result in async_results:\n if arg not in results:\n try:\n # This allows processes to be interrupted by CTRL+C\n results[arg] = result.get(1)\n except TimeoutError:\n pass\n except Exception as e:\n results[arg] = str(e)\n\n if show_progress:\n if callable(show_progress):\n progress = show_progress(list(results.keys()), args)\n else:\n progress = '%.2f%% completed' % (len(results) * 100.0 / len(async_results))\n show_status('%s: %s' % (progress_title, progress))\n\n pool.close()\n pool.join()\n\n return results\n\n except KeyboardInterrupt:\n os.killpg(os.getpid(), signal.SIGTERM) # Kills any child processes from subprocesses.\n pool.terminate()\n pool.join()\n sys.exit()", "def printProgressBar (self,iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total: \n print()", "def printProgressBar(iteration, total, prefix='Progress: ', suffix='Complete',\n decimals=1, length=50, fill='█'):\n global start_time\n if iteration == 0:\n start_time = time.time()\n value = 100 * (iteration / float(total))\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(value)\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n elapsed_time = int(time.time() - start_time)\n m = str(elapsed_time // 60).zfill(2)\n s = str(elapsed_time % 60).zfill(2)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total:\n print()", "def long_task(self):\n\tverb = ['Starting up', 'Booting', 'Repairing', 'Loading', 'Checking']\n\tadjective = ['master', 'radiant', 'silent', 'harmonic', 'fast']\n\tnoun = ['solar array', 'particle reshaper', 'cosmic ray', 'orbiter', 'bit']\n\tmessage = ''\n\ttotal = random.randint(10, 50)\n\tfor i in range(total):\n\t\tif not message or random.random() < 0.25:\n\t\t\tmessage = '{0} {1} {2}...'.format(random.choice(verb),\n\t\t\t\t\t\t\t\t\t\t\t random.choice(adjective),\n\t\t\t\t\t\t\t\t\t\t\t random.choice(noun))\n\t\tself.update_state(state='PROGRESS',\n\t\t\t\t\t\t meta={'current': i, 'total': total,\n\t\t\t\t\t\t\t\t'status': message})\n\treturn {'current': 100, 'total': 100, 'status': 'Task completed!',\n\t\t\t'result': 42}", "def printProgressBar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█', printEnd=\"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 *\n (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end=printEnd)\n # Print New Line on Complete\n if iteration == total:\n print()", "def printProgressBar (iteration, total, prefix = '\\tProgress', suffix = 'Complete', decimals = 2, length = 30, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total:\n print()", "def next(self):\n print(f\" {colored('[', 'yellow')}{bold(self.progress[self.pos])}{colored(']', 'yellow')} \"\n f\"{bold('Processing, please wait...')}\",\n end=\"\\r\",\n flush=True\n )\n self.increment()", "def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '*'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total:\n print()", "def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '')\n # Print New Line on Complete\n if iteration == total:\n print()", "def check_progress_logs(self, job_id: str, sleep_time: int = 45) -> Dict:\n sys.stdout.write(\n \"\"\"\n @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n @@@@.####.@@(.@@/((((.@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n @@@@@.####.&(@.,((((.@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n @@@@@@*####....((((.@@@@@@@@@@..*@@@@@@@.@@@@@@@@/.@@@@@@@@..@@@@@..@@@@@@..........@@@@@@..@@@@@@@@@@@@@@@@@.../@@@@@@@\n @@@@@@@(###.%@.(((.@@@@@@@@@@@.@*.(@@@@@.@@@@@@@@/.@@@@@@@@@@..@..@@@@@@@@@@@@..@@@@@@@@@@..@@@@@@@@@@@@@@@@..@@.,@@@@@@\n @@@@@@@@/#.....,(.@@@@@@@@@@@@.@@@,./@@@.@@@@@@@@/.@@@@@@@@@@@...@@@@@@@@@@@@@..@@@@@@@@@@..@@@@@@@@@@@@@@@.*@@@@..@@@@@\n @@@@@@@(###(.%*(((.@@@@@@@@@@@.@@@@@/.(@.@@@@@@@@/.@@@@@@@@@*.@@&.,@@@@@@@@@@@..@@@@@@@@@@..@@@@@@@@@@@@@@..........@@@@\n @@@@@@,####....((((.@@@@@@@@@@.@@@@@@@...@@@@@@@@/.@@@@@@@(./@@@@@..&@@@@@@@@@..@@@@@@@@@@......@@@@@@@@/.@@@@@@@@@,.@@@\n @@@@@.####.@,&.,((((.@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n @@@@.####.@@@*@@/((((.@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n\n Have some coffee while you wait...\n ( (\n ) )\n ........\n | |]\n \\ /\n `----'\n \"\"\"\n )\n sys.stdout.write(f'\\nSetting up Nixtla infrastructure (this will take up 5 mins)...\\n')\n\n idx_logs = 0\n in_progress = True\n while in_progress:\n resp = self.get_status(job_id)\n status = resp['status']\n logs = json.loads(resp['logs'])\n\n if status != 'InProgress' and not logs:\n time.sleep(30)\n resp = self.get_status(job_id)\n status = resp['status']\n logs = json.loads(resp['logs'])\n\n if logs:\n #if logs != latest_logs:\n for log in logs[idx_logs:]:\n sys.stdout.write(f'{log}\\n')\n #latest_logs = logs\n idx_logs = len(logs)\n\n in_progress = status == 'InProgress'\n\n time.sleep(sleep_time)\n\n return status", "def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total:\n print()", "def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total:\n print()", "def show_progress(show, current, max, text, *args):\n if show:\n progress = round((float(current) / max) * 100.0, 0)\n output = \"\\r\" + text.format(*args) + \" {0}% done. \".format(progress) \n sys.stdout.write(output)\n sys.stdout.flush()", "def get_progress(count, block_size, total_size) -> None:\r\n percent = int(count * block_size * 100 / total_size)\r\n print(f\"Downloading clip... {percent}%\", end=\"\\r\", flush=True)", "def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 50, fill = '█'):\r\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\r\n filledLength = int(length * iteration // total)\r\n bar = fill * filledLength + '.' * (length - filledLength)\r\n print('\\r %s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\r\n # Print New Line on Complete\r\n if iteration == total:\r\n print()\r\n print()", "def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total: \n print()", "def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total: \n print()", "def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total: \n print()", "def progress(self, arg, num_done, info=''):\n pass", "def log_result(t, io_lock=None):\n\n # there is no lock in single-job run, which includes\n # running test/runtest tests from multi-job run, so check.\n if io_lock:\n io_lock.acquire()\n try:\n if suppress_output or catch_output:\n sys.stdout.write(t.headline)\n if not suppress_output:\n if t.stdout:\n print(t.stdout)\n if t.stderr:\n print(t.stderr)\n print_time(\"Test execution time: %.1f seconds\\n\", t.test_time)\n finally:\n if io_lock:\n io_lock.release()\n\n if quit_on_failure and t.status == 1:\n print(\"Exiting due to error\")\n print(t.status)\n sys.exit(1)", "def printProgressBar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█', printEnd=\"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}', end=printEnd, flush=True)\n # Print New Line on Complete\n if iteration == total:\n print(flush=True)", "def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s\\r' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total: \n print()", "def await_simulation(self):\n\n # Test if overall statistics can be requested\n while True:\n d = self.BIVAS_API.get_output_overallstatistics(self.scenarioID)\n\n if (d is not None) and (d.status_code == 200):\n break\n\n logger.info('Waiting for BIVAS to finish...')\n time.sleep(60)\n\n logger.info(d.text)\n\n logger.info('Finished!')\n\n # Close BIVAS\n logger.info('Closing BIVAS')\n os.system('taskkill /f /im Bivas.exe')\n time.sleep(5)", "def wait():\n pass", "def PrintSpinner(self, stream=sys.stderr):\n self.UpdateSpinner()\n if not self.quiet_mode:\n stream.write(self.GetSpinner() + '\\r')", "def _print_progress(counter):\n\tif(slogviz.config.interactive):\n\t\tprint('parse log file entry nr: {}'.format(counter),end='\\r')", "def _iteration_changed(self):\n if self.showProgressBar:\n try:\n self._setProgress()\n except:\n # may fail when switching from training to inference\n from dbgp.client import brk; brk(port=9011)\n pass", "def PrintProgress(self):\n print ' Examined %d nodes, found %d unique...' % (\n self.nodes_examined, self.unique_nodes\n )", "def print_progressbar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(prefix, bar, percent, suffix)\n print('{} |{}| {} {}'.format(prefix, bar, percent, suffix), end=printEnd)\n # Print New Line on Complete\n if iteration == total:\n print()", "def dl_progress(count, block_size, total_size):\n percent = int(count*block_size*100/total_size)\n sys.stdout.write(\"\\r\" + 'Progress:' + \"...%d%%\" % percent)\n sys.stdout.flush()", "def progressBar(iterable, prefix = 'Progress:', suffix = 'Complete', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n total = len(iterable)\n\n #-- Progress Bar Printing Function\n def printProgressBar (iteration):\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(f'\\r{prefix} |{bar}| {iteration:,} of {total:,} ({100 * (iteration / float(total)):.1f}%) {suffix}', end = printEnd)\n\n #-- Initial Call\n printProgressBar(0)\n\n #-- Update Progress Bar\n for i, item in enumerate(iterable):\n yield item\n printProgressBar(i + 1)\n\n #--- Print New Line on Complete\n print()", "def print_progress(self, i, current_params):\n for split in range(1,11):\n if i == (round(self.iterations/10*split)-1):\n post = -self.neg_posterior(current_params)\n approx = self.create_normal_logq(current_params)\n diff = post - approx\n if not self.quiet_progress:\n print(str(split) + \"0% done : ELBO is \" + str(diff) + \", p(y,z) is \" + str(post) + \", q(z) is \" + str(approx))", "def get_progress(self):\n\t\treturn call_sdk_function('PrlJob_GetProgress', self.handle)", "def print_progress(iteration, total):\n iteration += 1\n prefix = 'Progress'\n suffix = 'Complete'\n length = 50\n fill = u\"\\u2588\"\n fill_alt = '#'\n\n percent = (\"{0:.1f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n bar_alt = fill_alt * filledLength + '-' * (length - filledLength)\n\n try:\n sys.stdout.write('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix))\n except:\n sys.stdout.write('\\r%s |%s| %s%% %s' % (prefix, bar_alt, percent, suffix))\n sys.stdout.flush()\n\n # Print New Line on Complete\n if iteration == total:\n print()", "def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n if total == 0:\n \treturn\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total: \n print()", "def l_print(*args):\n for rank in range(0, comm.size):\n comm.Barrier()\n if rank == comm.rank:\n l_print_no_barrier(*args)\n comm.Barrier()", "def print_progress(self, i, current_params):\n for split in range(1,11):\n if i == (round(self.iterations/10*split)-1):\n post = -self.full_neg_posterior(current_params)\n approx = self.create_normal_logq(current_params)\n diff = post - approx\n if not self.quiet_progress:\n print(str(split) + \"0% done : ELBO is \" + str(diff) + \", p(y,z) is \" + str(post) + \", q(z) is \" + str(approx))", "def waitStatus(j, wtype='Load'):\n timeout = 1\n curIter = 0\n maxIter = 60\n done = False\n while not done:\n stat = j.GetStatus(wtype)\n if stat == \"complete\":\n done = True\n else:\n curIter = curIter + 1\n if curIter > maxIter:\n raise ValueError(\"timeout waiting\")\n time.sleep(timeout)", "def wait_until_job_completes(self):\n while True:\n jobflow = self.conn.describe_jobflow(self.jobid)\n if self.verbose_mode:\n print jobflow.state\n if (jobflow.state == 'COMPLETED' or jobflow.state == 'TERMINATED'\n or jobflow.state == 'FAILED'):\n break\n sleep(10)", "def printProgressBar(iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '|'):\n\tpercent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n\tfilledLength = int(length * iteration // total)\n\tbar = fill * filledLength + '-' * (length - filledLength)\n\tprint('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n\t# Print New Line on Complete\n\tif iteration == total:\n\t\tprint()", "def status(self, loop=False, delay=10):\n if not self._submit:\n raise Exception('The workfow has not started its execution yet.\\n'\n 'Please, check if the workflow is planned and submitted for execution.')\n seq = False\n\n while True:\n out, err = subprocess.Popen('pegasus-status -l %s' % self.submit_dir, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, shell=True, cwd=self.base_dir).communicate()\n if err:\n raise Exception(err)\n\n for line in out.decode('utf8').split('\\n'):\n if 'UNRDY' in line:\n seq = True\n elif seq:\n seq = False\n v = line.split()\n\n state = v[8]\n if state == 'Success':\n state = '\\x1b[1;32m' + state + '\\x1b[0m'\n elif state == 'Failure':\n state = '\\x1b[1;31m' + state + '\\x1b[0m'\n\n progress = '\\x1b[1;34m' + 'Progress: ' + v[7] + '%\\x1b[0m (' + state + ')'\n completed = '\\x1b[1;32mCompleted: ' + v[5] + '\\x1b[0m'\n queued = '\\x1b[1;33mQueued: ' + v[1] + '\\x1b[0m'\n running = '\\x1b[1;36mRunning: ' + v[3] + '\\x1b[0m'\n fail = '\\x1b[1;31mFailed: ' + v[6] + '\\x1b[0m'\n\n st = progress + '\\t(' + completed + ', ' + queued + ', ' + running + ', ' + fail + ')'\n print('%s\\r' % st, end='')\n break\n\n if not loop or 'Success' in out.decode('utf8') or 'Failure' in out.decode('utf8'):\n break\n time.sleep(delay)" ]
[ "0.59242386", "0.59242386", "0.58943117", "0.567091", "0.5660828", "0.5650391", "0.5599117", "0.5516994", "0.55079556", "0.5468367", "0.54247904", "0.5390598", "0.53793055", "0.530005", "0.5276069", "0.525269", "0.525216", "0.5234912", "0.52020633", "0.5197482", "0.5197482", "0.5197482", "0.5197482", "0.5197482", "0.5196685", "0.5143726", "0.5137677", "0.51254624", "0.5114735", "0.5113474", "0.51075906", "0.50860506", "0.50823283", "0.5072386", "0.5061213", "0.5057108", "0.50565886", "0.50565886", "0.50565886", "0.50565886", "0.50565886", "0.50543207", "0.505211", "0.5048728", "0.5047189", "0.5046756", "0.5044626", "0.50387275", "0.5028296", "0.502516", "0.5019354", "0.50191206", "0.50191206", "0.50191206", "0.50191206", "0.500807", "0.50052345", "0.50010365", "0.50002724", "0.49993995", "0.49992695", "0.49968907", "0.49952573", "0.49950367", "0.4990051", "0.49842328", "0.49778613", "0.4977431", "0.49732587", "0.49702916", "0.49702916", "0.49659723", "0.49641034", "0.4962151", "0.4955203", "0.4955203", "0.4955203", "0.4954234", "0.49541754", "0.49528515", "0.49524486", "0.4948709", "0.4947579", "0.4944584", "0.49380603", "0.493734", "0.49368197", "0.49309048", "0.49299526", "0.49294895", "0.49280503", "0.4927808", "0.49213973", "0.49202955", "0.49188232", "0.4916952", "0.49108896", "0.4898564", "0.48965263", "0.48950723" ]
0.5838231
3
Creates and saves a User with the given email and password.
def create_user(self, email, password=None): if not email: raise ValueError('Users must have an email address') user = self.model( email=self.normalize_email(email), ) user.set_password(password) user.save(using=self._db) return user
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password=None, **extra_fields):\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n print(\"create user\")\n return user", "def create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError(_('The Email must be set'))\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save()\n return user", "def create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError(_('The Email must be set'))\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save()\n return user", "def create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError(_('The Email must be set'))\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save()\n return user", "def _create_user(self, email, password, **extra_fields):\n\n if not email:\n raise ValueError(\"Vous devez renseigner un email!\")\n\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError(_('Please provide your email address'))\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError(_('Email must be set'))\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save()\n return user", "def _create_user(self, email, password, **extra_fields):\n\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(email, password='test', **kwargs):\n user = get_user_model().objects.create(email=email, **kwargs)\n user.set_password(password)\n user.save()\n return user", "def _create_user(self, email, password, **extra_fields):\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, first_name, last_name, password, **extra_fields):\n if not email:\n raise ValueError(_('Email Address is required'))\n email = self.normalize_email(email)\n user = self.model(\n email=email,\n first_name=first_name,\n last_name=last_name,\n **extra_fields\n )\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The Email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save()\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The Email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save()\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The Email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save()\n return user", "def _create_user(self, email, password, **extra_fields):\n\t\tif not email:\n\t\t\traise ValueError('The given email must be set')\n\t\temail = self.normalize_email(email)\n\t\tuser = self.model(email=email, **extra_fields)\n\t\tuser.set_password(password)\n\t\tuser.save(using=self._db)\n\t\treturn user", "def _create_user(self, email, password, **extra_fields):\n\t\tif not email:\n\t\t\traise ValueError('The given email must be set')\n\t\temail = self.normalize_email(email)\n\t\tuser = self.model(email=email, **extra_fields)\n\t\tuser.set_password(password)\n\t\tuser.save(using=self._db)\n\t\treturn user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError(\"The given email must be set\")\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def signup(cls, username, first_name, last_name, email, password):\n\n hashed_pwd = bcrypt.generate_password_hash(password).decode('UTF-8')\n\n user = User(\n username=username,\n first_name=first_name,\n last_name=last_name,\n email=email,\n password=hashed_pwd,\n )\n\n db.session.add(user)\n return user", "def create_user(self, email, password=None, **extra_fields):\n\n if not email:\n raise ValueError('El usuario debe proporcionar un email')\n\n user = self.model(email=self.normalize_email(email), **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n\n return user", "def create_user(self,email,password=None,**extra_fields):\n if not email:\n raise ValueError(\"Please provide an email\")\n user = self.model(email=self.normalize_email(email),**extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password,username, **extra_fields):\r\n if not email:\r\n raise ValueError('The given email must be set')\r\n if not username:\r\n raise ValueError('The given username must be set')\r\n email = self.normalize_email(email)\r\n user = self.model(email=email,username=str.strip(username), **extra_fields)\r\n user.set_password(password)\r\n user.save(using=self._db)", "def _create_user(self, email: str, password: str, **extra_fields) -> 'User':\n if not email:\n raise ValueError(\"The given email must be set.\")\n email = self.normalize_email(email).lower()\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save()\n return user", "def _create_user(self, email, password, **extra_fields):\n\n if not email:\n raise ValueError('The given email must be set')\n\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n\n return user", "def _create_user(self, email, password, **extra_fields):\n validate_email(email)\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email: str, password: str, **extra):\n try:\n user = self.model(email=self.normalize_email(email),\n **extra)\n user.set_password(password)\n user.save(using=self._db)\n except IntegrityError as Ex:\n raise IntegrityError(\"Duplicate\")\n return user", "def _create_user(self, email, password=None, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save()\n\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password, **extra_fields):\n if not email:\n \traise ValueError('Must provide a valid email address')\n\n now = timezone.now()\n user = self.model(\n email=self.normalize_email(email),\n date_joined=now,\n last_login=now,\n **extra_fields\n ) \n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, username, firstname, lastname, password, **other_fields):\n\n if not email:\n raise ValueError(_('You must provide an email address'))\n\n email = self.normalize_email(email)\n user = self.model(email=email, username=username, firstname=firstname, lastname=lastname, **other_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email=None, password=None, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, username=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password, **kwargs):\n if not email:\n raise ValueError('User must have email address')\n if not password:\n raise ValueError('User must have password')\n email = self.normalize_email(email)\n user = self.model(email=email, **kwargs)\n user.set_password(password)\n user.save()\n\n return user", "def create_user(self, email, password=None, **extra_fields):\n if not email:\n raise ValueError('User must have an email address')\n\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n\n user.set_password(password) # Ensure password being encrypted\n user.save(using=self._db) # Save objects in django\n\n return user", "def create_user(self, email: str, password: str, **kwargs: str) -> \"User\":\n email = self.normalize_email(email)\n user: \"User\" = self.model(email=email, **kwargs)\n user.set_password(password)\n user.save()\n return user", "def create_user(self, email, password=None, **extra_fields):\n if not email:\n raise ValueError('Users must have an email address')\n user = self.model(email=self.normalize_email(email), **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n\n return user", "def create_user(self, email, password=None, **extra_fields):\n if not email:\n raise ValueError('Users must have an email address')\n user = self.model(email=self.normalize_email(email), **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n\n return user", "def create_user(self, email, password=None, **extra_fields):\n if not email:\n raise ValueError('Users must have an email address')\n user = self.model(email=self.normalize_email(email), **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n\n return user", "def _create_user(self, username, email, password, **extra_fields):\n if not email:\n raise ValueError('The email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password=None, **extra_fields):\n if not email:\n raise ValueError('Users must have an email address')\n\n user = self.model(email=self.normalize_email(email), **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password, username, **extra_fields):\n if not email:\n raise ValueError(_('Email is required.'))\n if not username:\n raise ValueError(_('Username is required.'))\n email = self.normalize_email(email)\n username = username\n user = self.model(email=email, username=username, **extra_fields)\n user.set_password(password)\n user.save()\n return user", "def _create_user(self, email, password, **extra_fields):\n\n email = self.normalize_email(email)\n #username = self.model.normalize_username(username)\n user = self.model( email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password=None, **extra_fields):\n if not email:\n raise ValueError('Please enter a valid email address')\n\n user = self.model(email=email.lower(), **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n\n return user", "def _create_user(self,email,password,**extra_fields):\n\t\tif not email:\n\t\t\traise ValueError('The given email must be set')\n\n\t\ttry:\n\t\t\twith transaction.atomic():\n\t\t\t\tuser = self.model(email=email,**extra_fields)\n\t\t\t\tuser.set_password(password)\n\t\t\t\tuser.save(using=self._db)\n\t\t\t\treturn user\n\t\texcept:\n\t\t\traise", "def _create_user(self, username, email, password, **extra_fields):\n if not username:\n raise ValueError('Username is required.')\n if not email:\n raise ValueError('Email is required.')\n if not password:\n raise ValueError('Password is required.')\n try:\n with transaction.atomic():\n user = self.model(username=username, email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user\n except:\n raise", "def create_user(self, email, username, first_name, last_name, password):\n\n email = self.normalize_email(email)\n\n user = self.model(\n email=email,\n username=username,\n first_name=first_name,\n last_name=last_name\n )\n\n user.set_password(password)\n user.save(using=self._db)\n\n return user", "def create_user(self, email, password=None, **extrac_fields):\n\n if not email:\n raise ValueError(\"User must have email\")\n\n email = self.normalize_email(email)\n\n user = self.model(email=email, **extrac_fields)\n\n user.set_password(password)\n\n user.save(using=self._db)\n\n return user", "def _create_user(self, email, password, first_name, last_name, **extra_fields):\n now = timezone.now()\n email = self.normalize_email(email)\n user = self.model(email=email,\n first_name=first_name,\n last_name=last_name,\n is_active=True,\n last_login=now,\n date_joined=now, **extra_fields)\n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password=None, **extra_fields):\n # Rasie an error if the email is empty\n if not email:\n raise ValueError('User must have an email address')\n # Make the email to be lower case for every new user\n user = self.model(email=self.normalize_email(email), **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n\n return user", "def _create_user(self, **fields):\n email = fields.pop('email')\n password = fields.get('password1')\n if not email:\n raise ValueError(\"Email address is required\")\n email = self.normalize_email(email)\n user = self.model(email=email, **fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, username, email, password, **other_fields):\n if not username or not email:\n raise ValueError(_('The email and username must be set.'))\n email = self.normalize_email(email)\n\n user = self.model(username=username, email=email, **other_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, name, password):\n\n if not email:\n raise ValueError(\"User must have an email address\")\n email = self.normalize_email(email)\n user = self.model(email=email)\n user.set_password(password)##encripts the password into HASH\n user.save(using=self._db)\n\n return user", "def _create_user(self, first_name, last_name, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n first_name = first_name\n last_name = self.last_name\n user = self.model(first_name, last_name,email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(email, password):\n\n user = User(email=email, password=password)\n \n db.session.add(user)\n db.session.commit()\n\n return user", "def _create_user(self, username, email, password, **extra_fields):\n if not username:\n raise ValueError('The given username must be set')\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n username = self.model.normalize_username(username)\n user = self.model(username=username, email=email, **extra_fields)\n user.password = make_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password=None):\n\t\tif not email:\n\t\t\traise ValueError(\"Users must have an email address.\")\n\t\tuser = self.model(\n\t\t\temail = self.normalize_email(email)\n\t\t)\n\t\tuser.set_password(password)\n\t\tuser.save(using=self._db)\n\t\treturn user", "def _create_user(self, username, name,\n email, password, **extra_fields):\n if not email:\n raise ValueError('Email field is required')\n email = self.normalize_email(email)\n user = self.model(\n username=username,\n name=name,\n email=email,\n **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, name, email, password):\n new_user = User(name=name, email=email, password=password)\n db.session.add(new_user)\n db.session.commit()", "def Create_user(self, email, name, password):\n\n #validating user inputs\n if not email:\n raise ValueError('Users must have email address')\n \n #normalize email (converting all to lowercase)\n email = self.normalize_email(email)\n #create a new user object\n user = self.model(email= email, name=name)\n\n #setting the password\n user.set_password(password)\n user.save(using = self._db) #using the same model created for the profile\n\n return user", "def register(cls, username, email, password):\n\n hashed_password = bcrypt.generate_password_hash(password).decode(\"UTF-8\")\n user = User(username=username, email=email, password=hashed_password)\n db.session.add(user)\n\n return user", "def create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError(_('The Email must be set'))\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n extra_fields.setdefault('is_active', True)\n user.save()\n return user", "def _create_user(self, first_name, last_name, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n now = timezone.now()\n email = self.normalize_email(email)\n user = self.model(\n email=email,\n first_name=first_name,\n last_name=last_name,\n is_active=True,\n is_activated=False,\n last_login=now,\n date_joined=now,\n **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password=None):\n if not email:\n raise ValueError('Users Must Have an email address')\n user = self.model(\n email=self.normalize_email(email),\n )\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password):\n if not email:\n raise ValueError('Users must have an email address')\n if not password:\n raise ValueError('Password is required')\n\n user = self.model(\n email=self.normalize_email(email),\n )\n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, first_name, last_name, password=None):\n if not email:\n raise ValueError(_('Users must have an email address'))\n\n user = self.model(\n email=self.normalize_email(email),\n first_name=first_name,\n last_name=last_name\n )\n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password=None):\n\n if not email:\n raise ValueError(\"Users must have an email address\")\n\n user = self.model(\n email=self.normalize_email(email))\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError(\"The given email must be set\")\n try:\n with transaction.atomic():\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.generate_activation_code()\n user.save(using=self._db)\n return user\n except:\n raise", "def create_user(self, username, email, password=None,commit=True):\n\n\n user = self.model(\n email=self.normalize_email(email),\n username = username\n )\n\n user.set_password(password)\n if commit:\n user.save(using=self._db)\n\n return user", "def create_user(self, email, first_name, last_name=None, password=None):\n if not email:\n raise ValueError('User must have an email-address')\n\n email = self.normalize_email(email)\n user = self.model(email=email, first_name=first_name, last_name=last_name)\n\n user.set_password(password)\n user.save(using=self._db)\n\n return user", "def _create_user(self, username, email, password, **extra_fields):\n if not username:\n raise ValueError('The given username must be set')\n email = self.normalize_email(email)\n username = self.model.normalize_username(username)\n user = self.model(username=username, email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password=None):\n if not email:\n raise ValueError('Users must have an email address')\n\n user = self.model(email=self.normalize_email(email))\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password=None):\n if not email:\n raise ValueError('Users must have an email address')\n user = self.model(email=self.normalize_email(email))\n user.set_password(password)\n user.save(using=self._db)\n\n return user", "def _create_user(self, username, email, password):\n\t\tnow = datetime.now()\n\t\tif username is None:\n\t\t\traise ValueError('Must include username')\n\t\tif email is None:\n\t\t\traise ValueError('Must include email')\n\t\temail = self.normalize_email(email)\n\t\tuser = self.model(\n\t\t\temail=self.normalize_email(email),\n\t\t\tusername=username,\n\t\t\tdate_joined=now\n\t\t)\n\t\tuser.set_password(password)\n\t\tuser.save(using=self._db)\n\t\treturn user", "def create_user(email='user@example.com', password='testpass123'):\n return get_user_model().objects.create_user(email=email, password=password)", "def _create_user(self, email, name, password, **extra_fields):\n if not email:\n raise ValueError('Users must have an email address')\n\n email = self.normalize_email(email)\n user = self.model(email=email, name=name, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n user.set_permissions(extra_fields.get('role'))\n return user", "def create_user(self, email, password=None):\n if not email:\n raise ValueError(\"Users must have an email address\")\n\n user = self.model(\n email=self.normalize_email(email),\n )\n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password=None):\n if not email:\n raise ValueError('Users must have an email address')\n\n user = self.model(\n email=self.normalize_email(email),\n )\n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password=None, **extra_fields):\n now = timezone.now()\n if not email:\n raise ValueError('The given email must be set')\n email = CBUserManager.normalize_email(email)\n user = self.model(email=email,\n is_staff=False, is_active=True, is_superuser=False,\n last_login=now, date_joined=now, **extra_fields)\n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password=None, **extra_fields):\n now = timezone.now()\n if not email:\n raise ValueError('The given email must be set')\n email = CBUserManager.normalize_email(email)\n user = self.model(email=email,\n is_staff=False, is_active=True, is_superuser=False,\n last_login=now, date_joined=now, **extra_fields)\n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, first_name, last_name, password=None):\n if not email:\n raise ValueError('User must have an email address')\n\n # normalizing email for standarization\n email = self.normalize_email(email) \n # creating user model that user manager is representing\n user = self.model(email=email, first_name=first_name, last_name=last_name)\n # Encrypting password using method of AbstractBaseUserClass\n user.set_password(password)\n # self._db to save to any database \n user.save(using=self._db)\n\n return user", "def create_user(email, password):\n try:\n User(email=email, password=password)\n except IntegrityError:\n print('Error: Duplicate email address')", "def create_user(self, email, name, password=None):\n try:\n email = self.normalize_email(email)\n user = self.model(email=email, name=name)\n user.set_password(password)\n # This saves the password as hash object\n user.save(using=self._db)\n # Since there can be many dbs in our app, the\n # best practice is to save the user in current db.\n return user\n except Exception as e:\n raise", "def create_user(self,email,password=None, **extra_fields):\n\n if not email: \n raise ValueError('Users must have an email address')\n #sets the email field of your user model, this is done on the model itself because there are no functions to change it.\n user = self.model(email=self.normalize_email(email), **extra_fields) \n user.set_password(password)\n user.save(using=self._db) #save using the defualt database in the settings.py file.\n\n return user", "def create_user(self, email, username, password=None):\n if not email:\n raise ValueError('Users must have an email address')\n if not username:\n raise ValueError('Users must have a username')\n \n user = self.model(email = self.normalize_email(email),\n username = username)\n \n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, username, password=None):\n if not email:\n raise ValueError('The given email must be set')\n if not username:\n raise ValueError('The given username must be set')\n \n user = self.model(email=self.normalize_email(email), username=username)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, username, email, password=None):\n\n if not username:\n raise ValueError('Users must have an username')\n if not email:\n raise ValueError('Users must have an email address')\n\n user = self.model(\n username=username,\n email=self.normalize_email(email),\n )\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_a_user(self, username='fry', email='fry@futur.ama', password='Qwerty!234'):\n user = User.objects.create_user(username, email, password)\n user.save()\n return user", "def create(cls, name, username, email, password):\n new_user = cls(name=name,\n username=username,\n email=email\n )\n new_user.password = bcrypt.generate_password_hash(\n password).decode('utf-8')\n\n db.session.add(new_user)\n db.session.commit()\n\n return new_user", "def create_user(self, email=None, name=None, password=None, phone=None):\n # if not email:\n # raise ValueError('Users must have an email address')\n\n user = self.model(\n email=email,\n name=name,\n phone=phone\n )\n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_new_user(cls, user_email, user_password, user_phone):\n\n new_user = User(email=user_email, password=user_password, mobile_phone=user_phone)\n\n db.session.add(new_user)\n db.session.commit()\n\n print \"Successfully added new user with the email: %s\" % user_email", "def create_user(self, name, email, password):\n\t\tif not name:\n\t\t\traise ValueError('You forgot to enter a name!')\n\t\tif not email:\n\t\t\traise ValueError('You forgot to enter an email address!')\n\t\tif not password:\n\t\t\traise ValueError('You forgot to enter a password!')\n\t\ttry:\n\t\t\tvalidate_email(email)\n\t\texcept ValidationError:\n\t\t\traise ValueError('The email address entered is invalid.')\n\t\tif User.objects.filter(email=email).count() > 0:\n\t\t\traise ValueError('The email address entered is already registered.')\n\t\tif len(password) < 8:\n\t\t\traise ValueError('The password must be at least 8 characters.')\n\t\t\n\t\tuser = self.model(\n\t\t\tname=name,\n\t\t\temail=self.normalize_email(email),\n\t\t)\n\t\t\n\t\tuser.set_password(password)\n\t\tuser.save(using=self._db)\n\t\treturn user" ]
[ "0.8417866", "0.84094507", "0.8398238", "0.83918047", "0.83918047", "0.83918047", "0.8383299", "0.8381542", "0.83808947", "0.8380314", "0.8379753", "0.83765846", "0.83742833", "0.83648807", "0.83648807", "0.83648807", "0.8361933", "0.8361933", "0.8348754", "0.8347133", "0.8347133", "0.8347133", "0.8347133", "0.8347133", "0.8347133", "0.8347133", "0.8347133", "0.83351076", "0.8326639", "0.83234733", "0.8319962", "0.8319741", "0.8313164", "0.8312642", "0.83102393", "0.8305448", "0.8303703", "0.8296919", "0.8283267", "0.8280729", "0.82755923", "0.8268521", "0.8258341", "0.8247434", "0.82324255", "0.82324255", "0.82324255", "0.82306826", "0.8225247", "0.82251865", "0.82232046", "0.82198167", "0.8215152", "0.82144284", "0.82140577", "0.8198058", "0.81846225", "0.81832135", "0.8176645", "0.8174298", "0.8169785", "0.8151005", "0.81402177", "0.81353855", "0.813233", "0.8130226", "0.8128868", "0.8119603", "0.8115544", "0.811359", "0.8101558", "0.80824125", "0.8081444", "0.8080872", "0.8077403", "0.8072611", "0.8055179", "0.8051275", "0.80457103", "0.8031804", "0.80178744", "0.801604", "0.80152196", "0.8013435", "0.7990996", "0.79864603", "0.79824084", "0.79824084", "0.79795563", "0.7979219", "0.79735535", "0.79711443", "0.7969021", "0.79659563", "0.796333", "0.7958423", "0.79568005", "0.79546314", "0.79519683", "0.7943896" ]
0.79938453
84
Creates and saves a superuser with the given email and password.
def create_superuser(self, email, password): user = self.create_user(email, password=password) user.is_admin = True user.save(using=self._db) return user
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createsuperuser():\n\n email = prompt('User E-Mail')\n email_confirm = prompt('Confirm E-Mail')\n\n if not email == email_confirm:\n sys.exit('\\nCould not create user: E-Mail did not match')\n\n if not EMAIL_REGEX.match(email):\n sys.exit('\\nCould not create user: Invalid E-Mail addresss')\n\n password = prompt_pass('User password')\n password_confirm = prompt_pass('Confirmed password')\n\n if not password == password_confirm:\n sys.exit('\\nCould not create user: Passwords did not match')\n\n datastore = SQLAlchemyUserDatastore(db, User, Role)\n datastore.create_user(\n email=email,\n password=encrypt_password(password),\n active=True,\n super_user=True)\n\n db.session.commit()", "def create_superuser(self, email, password):\n user = self.create_user(email, password)\n user.is_staff = True\n user.is_superuser = True\n user.save(using=self._db)\n\n return user", "def create_superuser(self, email, password):\n if password is None:\n raise TypeError('Superusers must have a password.')\n user = self.create_user(email, password)\n user.is_superuser = True\n user.is_staff = True\n user.save()\n return user", "def create_superuser(self, email, password, **extra_fields):\n return self.create_user(email, password, is_staff=True,\n is_superuser=True, **extra_fields)", "def create_superuser(self, email, password, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n\n if extra_fields.get('is_staff') is not True:\n raise ValueError('Superuser must have is_staff=True.')\n if extra_fields.get('is_superuser') is not True:\n raise ValueError('Superuser must have is_superuser=True.')\n\n return self._create_user(email, password, **extra_fields)", "def create_superuser(self, email, password, **extra_fields):\n return self._create_user(email, password, True, True, is_active=True,\n **extra_fields)", "def create_superuser(self, email, password):\n if password is None:\n raise TypeError('Superusers must have a password')\n user = self.create_user(email, password)\n user.is_superuser = True\n user.is_staff = True\n user.save()\n\n return user", "def create_superuser(self, email, password, **extra_fields):\n extra_fields.setdefault(\"is_staff\", True)\n extra_fields.setdefault(\"is_superuser\", True)\n extra_fields.setdefault(\"is_active\", True)\n\n if extra_fields.get(\"is_staff\") is not True:\n raise ValueError(_(\"Superuser must have is_staff=True.\"))\n if extra_fields.get(\"is_superuser\") is not True:\n raise ValueError(_(\"Superuser must have is_superuser=True.\"))\n return self.create_user(email, password, **extra_fields)", "def create_superuser(self, email, password, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n extra_fields.setdefault('is_active', True)\n\n\n if extra_fields.get('is_superuser') is False:\n raise ValueError(_('Superuser must have is_superuser=True.'))\n return self._create_user(email, password, **extra_fields)", "def create_superuser(self, email, password, **extra_fields):\n\t\textra_fields.setdefault('is_staff', True)\n\t\textra_fields.setdefault('is_superuser', True)\n\n\t\tif extra_fields.get('is_staff') is not True:\n\t\t\traise ValueError('Superuser must have is_staff=True.')\n\t\tif extra_fields.get('is_superuser') is not True:\n\t\t\traise ValueError('Superuser must have is_superuser=True.')\n\n\t\treturn self._create_user(email, password, **extra_fields)", "def create_superuser(self, username, email, password=None):\n if password is None:\n raise ValueError('Password should not be none')\n\n # Creating user instance and saving it to database\n user = self.create_user(username, email, password)\n\n # Assigning current user as superuser\n user.is_superuser = True\n user.is_staff = True\n\n # Saving the modified data to the database\n user.save()\n\n return user", "def create_superuser(self, email, password, **extra_fields):\n\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n\n if extra_fields.get('is_staff') is not True:\n raise ValueError('Superuser must have is_staff=True.')\n if extra_fields.get('is_superuser') is not True:\n raise ValueError('Superuser must have is_superuser=True.')\n\n return self._create_user(email, password, **extra_fields)", "def create_superuser(self, email, password):\n\t\tuser = self.create_user(email, password)\n\t\tuser.is_admin = True\n\t\tuser.save(using=self._db)\n\t\treturn user", "def create_superuser(self, email, password=None, **extra_fields):\n user = self.create_user(email, password)\n user.is_staff = True\n user.is_superuser = True\n user.save(using=self._db)\n\n return user", "def create_superuser(self, email, password, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n extra_fields.setdefault('is_active', True)\n\n return self.create_user(email, password, **extra_fields)", "def create_superuser(self, email, password, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n extra_fields.setdefault('is_active', True)\n\n if extra_fields.get('is_staff') is not True:\n raise ValueError(_('Superuser must have is_staff=True;'))\n if extra_fields.get('is_superuser') is not True:\n raise ValueError(_('Superuser must have is_superuser=True'))\n return self.create_user(email, password, **extra_fields)", "def create_superuser(self, email, password, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n extra_fields.setdefault('is_active', True)\n\n if extra_fields.get('is_staff') is not True:\n raise ValueError(_('Superuser must have is_staff=True.'))\n if extra_fields.get('is_superuser') is not True:\n raise ValueError(_('Superuser must have is_superuser=True.'))\n return self.create_user(email, password, **extra_fields)", "def create_superuser(self, email, password, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n extra_fields.setdefault('is_active', True)\n\n if extra_fields.get('is_staff') is not True:\n raise ValueError(_('Superuser must have is_staff=True.'))\n if extra_fields.get('is_superuser') is not True:\n raise ValueError(_('Superuser must have is_superuser=True.'))\n return self.create_user(email, password, **extra_fields)", "def create_superuser(self, email, password, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n extra_fields.setdefault('is_active', True)\n\n if extra_fields.get('is_staff') is not True:\n raise ValueError(_('Superuser must have is_staff=True.'))\n if extra_fields.get('is_superuser') is not True:\n raise ValueError(_('Superuser must have is_superuser=True.'))\n return self.create_user(email, password, **extra_fields)", "def create_superuser(self, email, password):\n\t\tuser = self.create_user(\n\t\t email,\n\t\t password=password,\n\t\t)\n\t\tuser.is_admin = True\n\t\tuser.save(using=self._db)\n\t\treturn user", "def create_superuser(self, email, password, **extrac_fields):\n\n user = self.create_user(email, password)\n\n user.is_superuser = True\n user.is_staff = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, password, **extra_fields):\n extra_fields.setdefault(\"is_staff\", True)\n extra_fields.setdefault(\"is_superuser\", True)\n\n if extra_fields.get(\"is_staff\") is not True:\n raise ValueError(\"Superuser must have is_staff=True.\")\n if extra_fields.get(\"is_superuser\") is not True:\n raise ValueError(\"Superuser must have is_superuser=True.\")\n\n return self._create_user(email, password, **extra_fields)", "def create_superuser(self, email, password, **kwargs):\n user = self.create_user(email, password, **kwargs)\n user.is_superuser = True\n user.is_staff = True\n user.save()\n\n return user", "def create_superuser(self, email, password, **kwargs):\n return self.create_account(email=email, password=password, is_staff=True,\n is_superuser=True, **kwargs)", "def create_superuser(self, email, password, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n\n if extra_fields.get('is_staff') is not True:\n raise ValueError('Superuser must have is_staff=True.')\n if extra_fields.get('is_superuser') is not True:\n raise ValueError('Superuser must have is_superuser=True.')\n\n return self._create_user(email, password, **extra_fields)", "def create_superuser(self, email, password, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n\n if extra_fields.get('is_staff') is not True:\n raise ValueError('Superuser must have is_staff=True.')\n if extra_fields.get('is_superuser') is not True:\n raise ValueError('Superuser must have is_superuser=True.')\n\n return self._create_user(email, password, **extra_fields)", "def create_superuser(self, email, password, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n\n if extra_fields.get('is_staff') is not True:\n raise ValueError('Superuser must have is_staff=True.')\n if extra_fields.get('is_superuser') is not True:\n raise ValueError('Superuser must have is_superuser=True.')\n\n return self._create_user(email, password, **extra_fields)", "def create_superuser(self, email, password, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n\n if extra_fields.get('is_staff') is not True:\n raise ValueError('Superuser must have is_staff=True.')\n if extra_fields.get('is_superuser') is not True:\n raise ValueError('Superuser must have is_superuser=True.')\n\n return self._create_user(email, password, **extra_fields)", "def create_superuser(self, email, password, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n\n if extra_fields.get('is_staff') is not True:\n raise ValueError('Superuser must have is_staff=True.')\n if extra_fields.get('is_superuser') is not True:\n raise ValueError('Superuser must have is_superuser=True.')\n\n return self._create_user(email, password, **extra_fields)", "def create_superuser(self, email, password, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n extra_fields.setdefault('is_manager', True)\n extra_fields.setdefault('is_active', True)\n\n if extra_fields.get('is_staff') is not True:\n raise ValueError(_('Superuser must have is_staff=True.'))\n if extra_fields.get('is_manager') is not True:\n raise ValueError(_('Superuser must have is_manager=True.'))\n if extra_fields.get('is_superuser') is not True:\n raise ValueError(_('Superuser must have is_superuser=True.'))\n return self.create_user(email, password, **extra_fields)", "def create_superuser(self, email, name, password):\n\n user = self.create_user(email, name, password)\n\n # Make this user an admin.\n user.is_superuser = True\n user.is_staff = True\n user.save(using=self._db)\n\n return user", "def create_superuser(self, email, password):\n\n user = self.create_user(email, password=password)\n user.is_staff = True\n user.is_superuser = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, password):\n user = self.create_user(\n email,\n password=password,\n )\n user.is_admin = True\n user.is_superuser = True\n user.save(using=self._db)\n\n return user", "def create_superuser(self, username, email, password):\n print(\"creating super user....\")\n user = self.create_user(\n\n username=username,\n password=password,\n email = email,\n commit=False,\n )\n user.is_staff = True\n user.is_superuser = True\n user.save(using=self._db)\n return user", "def create_superuser(self, username, email, password):\n if password is None:\n raise TypeError('Superusers must have a password.')\n\n user = self.create_user(username, email, password)\n user.is_superuser = True\n user.is_staff = True\n user.save()\n\n return user", "def create_superuser(self, email, password):\n user = self.create_user(email, password)\n\n user.is_superuser = True # 'is_superuser' is created automatically\n user.is_staff = True\n user.save(using=self._db)\n\n return user", "def create_superuser(self, email, password=None, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n\n if extra_fields.get('is_staff') is not True:\n raise ValueError('Superuser must have is_staff=True.')\n if extra_fields.get('is_superuser') is not True:\n raise ValueError('Superuser must have is_superuser=True.')\n\n return self._create_user(email, password, **extra_fields)", "def create_superuser(self, email, password):\n\n user = self.create_user(email, password)\n user.is_staff = True\n user.is_superuser = True\n user.save(using=self._db)\n\n return user", "def create_superuser(self, email, first_name='', last_name='', password=None, **extra_fields):\n return self._create_user(email, password, first_name, last_name, is_staff=True, is_superuser=True,\n **extra_fields)", "def create_superuser(self,email,name,password):\n\n user = self.create_user(email, name, password)\n user.is_superuser = True\n user.is_staff = True\n\n user.save(using=self._db)\n return user", "def _create_superuser(username, email, password):\n if username and email and password:\n user, created = User.objects.get_or_create(pk=defaults.USERWARE_SUPERUSER_ID)\n if user:\n user.username = username\n user.email = email\n user.set_password(password)\n user.is_staff = True\n user.is_active = True\n user.is_superuser = True\n user.save()\n action = \"Created\" if created else \"Updated\"\n print >> sys.stderr, \"{} Superuser: [username={}, email={}, id={}]\".format(action, username, email, user.id)", "def create_superuser(self, email, username, first_name, last_name, password):\n\n user = self.create_user(\n email,\n username,\n first_name,\n last_name,\n password\n )\n\n user.is_superuser = True\n user.is_staff = True\n\n user.save(using=self._db)\n\n return user", "def create_superuser(self, email, password):\n if password is None:\n raise TypeError('Superusers must have a password.')\n\n user = self.model(email=email)\n user.set_password(password)\n user.is_superuser = True\n user.is_staff = True\n user.save()\n\n return user", "def create_superuser(self, email, password=None):\n user = self.create_user(\n email,\n password=password,\n )\n user.is_admin = True\n user.save(using=self._db)\n return user", "def create_superuser(self, username, email, password):\n if password is None:\n raise TypeError('Superusers must have a password.')\n\n id_number = self.create_id_number()\n user = self.create_user(\n username=username,\n email=email,\n password=password,\n id_number=id_number\n )\n user.is_superuser = True\n user.is_active = True\n user.is_staff = True\n user.save()\n\n return user", "def create_superuser(self, email, password):\n user = self.create_user(email, password)\n user.is_superuser = True\n user.is_staff = True\n user.save(using=self._db)\n\n return user", "def create_superuser(self, email, password):\n # Create a new user using create_user\n user = self.create_user(email, password)\n user.is_staff = True\n # Make the user to be a superuser\n user.is_superuser = True\n user.save(using=self._db)\n\n return user", "def create_superuser(self, username, password, email=None):\n user = self.create_user(username, password)\n user.is_staff = True\n user.is_superuser = True\n user.save()\n\n return user", "def create_superuser(self, *args, **kwargs):\n password = kwargs.pop('password', '')\n email = kwargs.pop('email', '')\n user = self.model(email=self.normalize_email(email), **kwargs)\n user.set_password(password)\n user.is_superuser = True\n user.is_staff = True\n user.save()\n\n return user", "def create_superuser(self, email, password, **extra_fields):\n user = self.model(\n email = email,\n **extra_fields \n )\n user.set_password(password)\n user.is_admin =True\n user.is_superuser=True\n user.is_staff=True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, first_name, last_name, password):\n user = self.create_user(email, password=password, first_name=first_name, last_name=last_name)\n user.is_admin = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, password=None):\n user = self.create_user(email)\n user.is_admin = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, password):\n user = self.create_user(email, password)\n user.is_staff = True\n user.is_superuser = True\n user.save(using=self._db)\n\n return user", "def create_superuser(self, email, password):\n user = self.create_user(email, password)\n user.is_staff = True\n user.is_superuser = True\n user.save(using=self._db)\n\n return user", "def create_superuser(self, email, password):\n user = self.create_user(email, password=password)\n user.is_admin = True\n user.is_staff = True\n user.is_superuser = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, password=None):\n user = self.create_user(email, password=password)\n user.is_admin = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, password):\n user = self.create_user(email,\n password=password)\n user.is_admin = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, name, password):\n # Create a normal user first, then change it to the super user\n user = self.create_user(email, name, password)\n user.is_superuser = True\n user.is_staff = True\n user.save(using=self._db)\n\n return user", "def create_user(self, email, password=None, **extra_fields):\n extra_fields.setdefault('is_staff', False)\n extra_fields.setdefault('is_superuser', False)\n return self._create_user(email, password, **extra_fields)", "def create_superuser(self,email,password):\n user = self.create_user(email,password)\n user.is_staff = True\n user.is_superuser = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, first_name, last_name, password):\n user = self.create_user(email, first_name, last_name, password)\n\n user.is_superuser = True\n user.is_staff = True\n user.save(using=self._db)\n\n return user", "def create_superuser(self, username, email, password):\n user = self.create_user(\n username,\n email,\n password=password,\n )\n user.admin = True\n user.staff = True\n user.save(using=self._db)\n return user", "def create_superuser(self, username, email, password=None):\n\n user = self.create_user(\n username,\n email,\n password=password,\n )\n user.is_admin = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, name, password):\n\n user = self.create_user(email, name, password)\n\n user.is_superuser = True\n user.is_staff = True\n\n user.save(using=self._db)\n\n return user", "def create_superuser(self, email, full_name, password=None):\n print(\"Is this the method being called\")\n user = self.create_user(\n email,\n full_name,\n password=password,\n )\n user.staff = True\n user.admin = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, name, password):\n user = self.create_user(email, name, password)\n user.is_superuser = True\n user.is_staff = True\n user.save(using=self._db)\n return user", "def create_superuser(self, username, email, password):\n user = self.create_user(\n username,\n email,\n password=password,\n )\n user.is_admin = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, password, firstname, lastname):\n user = self.create_user(\n firstname,\n lastname,\n email,\n '',\n password=password,\n )\n user.is_staff = True\n user.is_superuser = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, username, password):\n user = self.create_user(email, password=password,\n username=username )\n user.is_admin = True\n user.save(using=self._db)\n return user", "def create_superuser(self, username, firstname, lastname, email, password):\n user = self.create_user(\n username=username,\n firstname=firstname,\n lastname=lastname,\n email=email,\n password=password,\n )\n user.is_admin = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, password=None):\n user = self.create_user(email, password)\n user.is_active = True\n user.is_staff = True\n user.save(using=self._db)\n return user", "def create_superuser(self, username, email, password):\n\t\tuser = self._create_user(username, email, password)\n\t\tuser.is_admin = True\n\t\tuser.is_author = True\n\t\tuser.save(using=self._db)\n\t\treturn user", "def create_superuser(self, email, name, password, **extra_fields):\n extra_fields.setdefault('role', User.ROLE_ADMIN)\n extra_fields.setdefault('is_superuser', True)\n extra_fields.setdefault('is_staff', True)\n\n if extra_fields.get('is_superuser') is not True:\n raise ValueError('Superuser must have is_superuser=True.')\n\n return self._create_user(email, name, password, **extra_fields)", "def create_superuser(self, email, name, password):\r\n user = self.create_user(\r\n email,\r\n password=password,\r\n name=name,\r\n )\r\n user.is_admin = True\r\n user.save(using=self._db)\r\n return user", "def create_superuser(self, email, password, full_name=None):\n user = self.create_user(\n email,\n password=password,\n )\n user.staff = True\n user.admin = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, username, password):\n user = self.create_user(\n email,\n username,\n password,\n )\n user.is_staff = True\n user.is_superuser = True\n user.is_admin = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, password, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n extra_fields.setdefault('is_active', True)\n extra_fields.setdefault('type', \"ADMINISTRATOR\")\n\n if extra_fields.get('is_staff') is not True:\n raise ValueError(_('Superuser must have is_staff=True.'))\n if extra_fields.get('is_superuser') is not True:\n raise ValueError(_('Superuser must have is_superuser=True.'))\n return self.create_user(email, password, **extra_fields)", "def create_superuser(self, email, name, password):\n user = self.create_user(email,\n password=password,\n name=name,\n )\n user.is_superuser = True\n user.is_staff = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, name, password):\n user = self.create_user(email, name, password)\n\n user.is_superuser = True\n user.is_staff = True\n user.save(using=self._db)\n\n return user", "def create_superuser(self, email, name, password):\n user = self.create_user(email, name, password)\n\n user.is_superuser = True\n user.is_staff = True\n user.save(using=self._db)\n\n return user", "def create_superuser(self, username, password, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n extra_fields.setdefault('is_active', True)\n\n if extra_fields.get('is_staff') is not True:\n raise ValueError(_('Superuser must have is_staff=True.'))\n if extra_fields.get('is_superuser') is not True:\n raise ValueError(_('Superuser must have is_superuser=True.'))\n user = self.model(username=username, **extra_fields)\n user.set_password(password)\n user.save()\n return user", "def create_superuser(self, username, email, password):\n user = self.create_user(username, email,\n password=password\n )\n user.is_admin = True\n user.is_active = True\n user.is_superuser = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, name, password=None):\n\n user = self.create_user(email, name, password)\n\n user.is_superuser = True\n user.is_staff = True\n\n user.save(using=self._db)\n\n return user", "def create_superuser(self, su_id, first_name, last_name, email, phone_number, password):\n user = self.create_user(\n su_id,\n first_name,\n last_name,\n email,\n phone_number,\n password=password,\n )\n user.is_admin = True\n user.save(using=self._db)\n return user", "def create_superuser(self, username, email: str = None, password: str = None, **kwargs):\n kwargs.setdefault('is_staff', True)\n kwargs.setdefault('is_superuser', True)\n return self._create_user(username, email=email, password=password, **kwargs)", "def create_superuser(self, name, email, password):\n user = self.create_user(name, email, password)\n user.is_superuser = True\n user.is_staff = True\n user.save(using=self._db)\n return user", "def create_superuser(self, username, email, password=None):\n user = self.create_user(\n username,\n email,\n password=password,\n )\n user.is_admin = True\n user.is_staff = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, name, password=None):\n user = self.create_user(\n email,\n password=password,\n name=name,\n )\n user.is_admin = True\n user.save(using=self._db)\n return user", "def create_superuser(self, username, email, password):\n user = self.create_user(\n username,\n email,\n password=password,\n\n )\n\n user.is_admin = True\n user.is_staff = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, name, password):\n user = self.create_user(email, name, password)\n # is_superuser is created automatically by PermissionsMixin\n user.is_superuser = True\n user.is_staff = True\n user.save(using=self._db)\n\n return user", "def create_superuser(self, email, date_of_birth, password):\n user = self.create_user(email,\n password=password,\n date_of_birth=date_of_birth\n )\n user.is_admin = True\n user.save()\n return user", "def create_superuser(self, username, email, password):\n return self.create_user(username, email, password, is_staff = True, is_superuser= True)", "def create_superuser(self, email, first_name, password, last_name=None):\n user = self.create_user(email, first_name, last_name=last_name, password=password)\n\n user.is_superuser = True\n user.is_staff = True\n user.save(using=self._db)\n\n return user", "def create_user(self, email, password=None, **extra_fields):\n extra_fields.setdefault(\"is_staff\", False)\n extra_fields.setdefault(\"is_superuser\", False)\n return self._create_user(email, password, **extra_fields)", "def create_superuser(self, email, username, password):\n user = self.create_user(email=email, username=username, password=password)\n\n user.is_superuser = True\n user.is_staff = True\n user.roles = \"UA\"\n\n user.save(using=self._db)\n\n return user", "def create_user(self, email, password=None, **extra_fields):\n extra_fields.setdefault('is_staff', False)\n extra_fields.setdefault('is_superuser', False)\n return self._create_user(email, password, **extra_fields)", "def create_user(self, email, password=None, **extra_fields):\n extra_fields.setdefault('is_staff', False)\n extra_fields.setdefault('is_superuser', False)\n return self._create_user(email, password, **extra_fields)", "def create_user(self, email, password=None, **extra_fields):\n extra_fields.setdefault('is_staff', False)\n extra_fields.setdefault('is_superuser', False)\n return self._create_user(email, password, **extra_fields)", "def create_user(self, email, password=None, **extra_fields):\n extra_fields.setdefault('is_staff', False)\n extra_fields.setdefault('is_superuser', False)\n return self._create_user(email, password, **extra_fields)", "def create_user(self, email, password=None, **extra_fields):\n extra_fields.setdefault('is_staff', False)\n extra_fields.setdefault('is_superuser', False)\n return self._create_user(email, password, **extra_fields)" ]
[ "0.83345544", "0.82813346", "0.8258201", "0.8249415", "0.8248188", "0.8223905", "0.82129776", "0.8210377", "0.8180428", "0.8175924", "0.8174477", "0.8159496", "0.81555283", "0.8152179", "0.8147241", "0.8146682", "0.81466043", "0.81466043", "0.81466043", "0.81454295", "0.813191", "0.8122425", "0.8113123", "0.81122416", "0.8111555", "0.8111555", "0.8111555", "0.8111555", "0.8111555", "0.8096645", "0.80870897", "0.80782974", "0.8073682", "0.8064242", "0.8060121", "0.80563986", "0.80556476", "0.8053624", "0.8052449", "0.8042498", "0.8041838", "0.8037606", "0.8036356", "0.80359536", "0.8032157", "0.8027266", "0.8026746", "0.8025416", "0.802289", "0.80132306", "0.8009699", "0.80085695", "0.80017203", "0.80017203", "0.80014163", "0.7996552", "0.7992682", "0.79904085", "0.7989501", "0.79844254", "0.7983662", "0.7981337", "0.7979298", "0.79528415", "0.79487747", "0.79454297", "0.7943778", "0.79416806", "0.7930791", "0.7930067", "0.79201204", "0.79152834", "0.79150033", "0.7914253", "0.7907019", "0.7905839", "0.7900301", "0.78925514", "0.7885453", "0.7885453", "0.7878807", "0.7874154", "0.78659457", "0.7846161", "0.78412604", "0.7837525", "0.7830759", "0.78305024", "0.78154755", "0.7809416", "0.77843595", "0.77798975", "0.77794546", "0.7765717", "0.7761942", "0.7755883", "0.7755883", "0.7755883", "0.7755883", "0.7755883" ]
0.80541736
37
Get a name for the user. Use this whenever you show the user in the UI.
def displayname(self): return self.email
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def user_name(self) -> str:\n return pulumi.get(self, \"user_name\")", "def name(self) -> str:\n return self.user.name", "def getUserName(self):\n user = User.by_id(self.user_id)\n return user.name", "def user_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user_name\")", "def get_name(self):\n return self.user.username if self.user.username else self.user.email", "def user_name(self):\n return self._user_name", "def getUserName(self):\n userType = self.env['res.users']\n \n uiUser = userType.browse(self._uid)\n return uiUser.name", "def get_name(self) :\n\n return self.factory.to_user_name(self.name)", "def user_name(self):\n\n return self._user_name", "def user_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"user_name\")", "def user_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"user_name\")", "def user_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"user_name\")", "def get_user_display_name(self):\n return self.user.get_full_name() or self.user.get_username()", "def user_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_name\")", "def get_user_name(self):\n full_name = f'{self.f_name} {self.l_name}'\n return full_name", "def getName(self):\n return self.__username", "def get_user_name(user: User) -> str:\n user_name = user.get(\"display_name\")\n if not user_name:\n user_name = user[\"fullname\"]\n if not user_name:\n user_name = user[\"name\"]\n return user_name", "def user_name(self):\n return lamin_user_settings().name", "def get_full_name(self):\n return self.username", "def get_full_name(self):\n return self.username", "def get_displayname(self):\n return self.full_name or self.user.username", "def _get_user_name(self):\n if self.runtime.get_real_user is None:\n return 'staff'\n else:\n return self.runtime.get_real_user(self.runtime.anonymous_student_id).username", "def user_name(self):\n return self._stub.List(self._message).user_name", "def display_name(self) -> str:\n return self.requester.username", "def get_name(self):\n user = self.user\n name = \"%s %s\" % (user.first_name, user.last_name)\n name = name.strip()\n\n return self.display_name or name or user.email or user.username", "def __str__(self):\n return self.user_name", "def __str__(self):\n return self.user.get_full_name()", "async def get_user_name(self, user_target: str) -> str:\n user = await self.get_user(user_target=user_target)\n if user is None:\n return user_target\n return user.display_name", "def get_short_name(self):\n return self.username", "def get_short_name(self):\n return self.username", "def get_short_name(self):\n return self.username", "def get_user_fullname(self):\n return self.applicant.userprofile.display_name()", "def get_user_display_name():\n user_display_name = session.get(\"user_display_name\")\n return user_display_name if user_display_name else None", "def get_user_name(self):\n\t\treturn call_sdk_function('PrlLic_GetUserName', self.handle)", "def get_short_name(self):\n # The user is identified by the email address\n return self.email", "def get_short_name(self):\n # The user is identified by their email address\n return self.first_name", "def get_name(username):\n print(\"We halo \" + username + \" , piye kabare?\")", "def get_full_name(self):\n # The user is identified by their email address\n return self.first_name+' '+self.last_name", "def full_name(self):\n return self.user.get_full_name() or None", "def __str__(self) -> str:\n\n return self.user.get_full_name()", "def get_username(self):\r\n return self.username", "def user_display_name(self):\n return self.key.id()", "def name(self):\n name = self.__telegram_info.message.from_user.name\n return name[0].upper() + name[1::]", "def name(self):\n if self.user_provided_name is not None:\n return self.user_provided_name\n else:\n return super().name", "def get_username(self):\n return self.browser.find_element(*locators.USER_NAME_TEXT).text", "def __str__(self):\n return \"{}\".format(self.user.username)", "def username(self) -> str:\n return pulumi.get(self, \"username\")", "def username(self) -> str:\n return pulumi.get(self, \"username\")", "def username(self) -> str:\n return pulumi.get(self, \"username\")", "def get_current_user_full_name(self):\n user_service = self.runtime.service(self, 'user')\n xb_user = user_service.get_current_user()\n\n return xb_user.full_name", "def username(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"username\")", "def user(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user\")", "def get_username(self):\n return str(getattr(self, self.USERNAME_FIELD))", "def get_username(self):\n return self.username", "def username(self):\n return self.user.username", "def username(user_id):\n return UserIndex.instance().name(user_id)", "def get_facebook_name(self, user_name):\n url = 'https://mbasic.facebook.com/{}'.format(user_name)\n self.get(url)\n name = self.title\n log.debug(\"NAME: \" + name)\n return name", "def _get_username_from_api(self):\n result = self.api_query(action=\"query\", meta=\"userinfo\")\n return result[\"query\"][\"userinfo\"][\"name\"]", "def get_user_fullname(self):\n member = self.get_user()\n if member:\n return member.getProperty('fullname')", "def username(self, instance):\r\n return instance.user.username", "def username(self) -> str:", "def username(self) -> str:", "def __str__(self):\n return self.user.first_name", "def get_username(self, obj):\n return obj.user.username", "def get_username(self, obj):\n return obj.user.username", "def __str__(self) -> str:\n return self.name or self.username", "def get_user_name(self, uid):\n uid = str(uid)\n name = self._username_cache.get(uid)\n if name is None:\n name = self.fbchat_client.fetchUserInfo(uid)[uid].name\n self._username_cache[uid] = name\n return name", "def __str__(self):\n return self.user.username", "def __str__(self):\n return self.user.username", "def __str__(self):\n return self.user.username", "def __str__(self):\n return self.user.username", "def __str__(self):\n return self.user.username", "def get_username(self) -> str:\n return self._username", "def get_user_name_by_id(self, user_id):\n try:\n res = self.db_handler.get_user_name_by_id(user_id)\n\n self.logger.write_to_log('user full name got', user_id)\n\n return res\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def _get_name(self):\n return self.name", "def GetUsername(self):\n pass", "def user_name(self) -> str:\n result = subprocess.run(\n [\"git\", \"config\", \"user.name\"],\n check=True,\n cwd=self.dir,\n encoding=\"utf-8\",\n stdout=subprocess.PIPE,\n )\n return result.stdout.strip()", "def username(self) -> str:\n raise NotImplementedError", "def user_name(self):\n return utils.to_unicode(lib.sp_session_user_name(self._sp_session))", "def get_username(self, request):\r\n try:\r\n return request.user.username\r\n except AttributeError:\r\n return ''", "def get_name(self) -> str:\r\n return self.name", "def last_name(self, instance):\r\n return instance.user.last_name", "def show_user_info(self):\n name = self.get_user_name()\n print(f'Name: {name.title()}')\n print(f'Age: {self.age}')\n print(f'Gender: {self.gender.title()}')\n print(f'Mobile: {self.m_number}')", "def _get_label ( self ):\n if self._label is not None:\n return self._label\n return user_name_for( self.name )", "def get_name(self):\r\n return self.name", "def full_name(self, obj: User) -> str:\n return obj.get_full_name()", "def get_name(user_id):\n try:\n student = _UserProfile.objects.get(user_id=user_id)\n except _UserProfile.DoesNotExist:\n log.exception(f'Could not find UserProfile for id {user_id}')\n return None\n return student.name or None", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name" ]
[ "0.85959435", "0.846185", "0.83146304", "0.8313038", "0.8228794", "0.8220969", "0.8218388", "0.81126636", "0.8054196", "0.8036822", "0.8036822", "0.8036822", "0.80256647", "0.79910946", "0.79809284", "0.7884007", "0.78487474", "0.7847428", "0.78015506", "0.78015506", "0.7790366", "0.77718383", "0.7764101", "0.7711553", "0.7711345", "0.7631133", "0.7587803", "0.7568301", "0.7546056", "0.7546056", "0.7546056", "0.7543907", "0.75401896", "0.7509776", "0.74987173", "0.7489906", "0.7471283", "0.74279094", "0.7426687", "0.7382192", "0.735303", "0.731993", "0.7314211", "0.73070323", "0.73042977", "0.73029864", "0.72999287", "0.72999287", "0.72999287", "0.72945064", "0.7294246", "0.7293697", "0.72814775", "0.7267477", "0.72537464", "0.7250718", "0.7244959", "0.7234362", "0.7227453", "0.72161007", "0.720906", "0.720906", "0.7207028", "0.72042745", "0.72042745", "0.719405", "0.71911454", "0.7170593", "0.7170593", "0.7170593", "0.7170593", "0.7170593", "0.7153821", "0.71482176", "0.7145064", "0.7141173", "0.7129781", "0.7129589", "0.7125274", "0.71224487", "0.7105953", "0.709573", "0.7088869", "0.7086534", "0.7063233", "0.7059905", "0.70514137", "0.7037589", "0.7037589", "0.7037589", "0.7037589", "0.7037589", "0.7037589", "0.7037589", "0.7037589", "0.7037589", "0.7037589", "0.7037589", "0.7037589", "0.7037589", "0.7037589" ]
0.0
-1
Is the user a member of staff?
def is_staff(self): return self.is_admin
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def is_staff(ctx):\n member = ctx.message.author\n vipRole = discord.utils.get(member.guild.roles, name=ROLE_VIP)\n staffRole = discord.utils.get(member.guild.roles, name=ROLE_STAFF)\n return vipRole in member.roles or staffRole in member.roles", "def is_staff(self):\n # Simplest possible answer: All admins are staff\n return self.is_admin", "def is_staff(self):\n # Simplest possible answer: All admins are staff\n return self.is_admin", "def is_staff(self):\n # Simplest possible answer: All admins are staff\n return self.is_admin", "def is_staff(self):\n\t\treturn self.is_admin", "def is_staff(self) -> bool:\n return self.is_admin", "def is_staff(self):\r\n return self.is_admin", "def is_staff(request):\n\n if request:\n if hasattr(request, 'user') and request.user.is_authenticated():\n return request.user.is_staff\n return False", "def is_member(user: User) -> bool:\n if not user:\n raise TypeError('user should not be None')\n return user.name.startswith('L')", "def is_faculty():\n return _is_member('uw_faculty')", "def is_employee():\n return _is_member('uw_employee')", "def is_staff_user(self):\n\n return self.is_staff", "def is_staff(connection, window_info, kwargs):\n return window_info and window_info.is_staff", "def is_verified_by_staff(self):\n form_entries = self.form_entries.all().distinct('user')\n for f in form_entries:\n if f.user.is_staff or f.user.is_superuser:\n return self.verified\n\n return False", "def _is_staff_for_article(article, user):\r\n return user.is_staff or user.is_superuser or user_is_article_course_staff(user, article)", "def is_course_staff(self):\n # pylint: disable=no-member\n return getattr(self.xmodule_runtime, 'user_is_staff', False)", "def is_course_staff(self):\n # pylint: disable=no-member\n return getattr(self.xmodule_runtime, 'user_is_staff', False)", "def is_admin(user):\n return user.groups.filter(name='Profesores').exists()", "def is_user_allowed(self, user):\n return user.is_staff", "def test_user_role_staff(self):\r\n self.assertEqual(\r\n 'staff',\r\n access.get_user_role(self.course_staff, self.course_key)\r\n )\r\n # Masquerade staff\r\n self.course_staff.masquerade_as_student = True\r\n self.assertEqual(\r\n 'student',\r\n access.get_user_role(self.course_staff, self.course_key)\r\n )", "def test_func(self):\n if not self.request.user.is_authenticated:\n return False\n if self.request.user.is_staff:\n return True\n return self.get_user() == self.request.user", "def is_revised_by_staff(self):\n\n form_entries = self.form_entries.all().distinct('user')\n for f in form_entries:\n if f.user.is_staff or f.user.is_superuser:\n return True\n\n return False", "def is_member(self, id, user):\n request = self.request_builder('orgs.teams.is_member',\n id=id, user=user)\n return self._bool(request)", "def test_is_staff_access(self):\r\n self.check_index_and_outline(self.client)", "def user_is_admin(user):\n return user in admins", "def is_student_employee():\n return _is_member('uw_affiliation_student-employee')", "def testPersonIsUser(self):\n member = self.portal.portal_membership.getMemberById('abc123')\n self.failUnless(member,\"%s\" % member)", "def test_is_student_user(self):\n student = User.objects.get(email='teststudentuser@test.com')\n self.assertEqual(student.is_staff, False)", "def current_user_has_access(self):\n return self.user_has_access(users.get_current_user())", "async def is_launcher(ctx):\n member = ctx.message.author\n staff = await is_staff(ctx)\n lhRole = discord.utils.get(member.guild.roles, name=ROLE_LH)\n if staff or lhRole in member.roles: return True", "def has_user(self, user): # pylint: disable=unused-argument\r\n return False", "def is_admin(self):\n if not self.current_user:\n return False\n else:\n return self.current_user in [\"1\"]", "def user_has_access(self, user):\n if not user: return False\n query = db.Query(TaskListMember)\n query.filter('task_list =', self)\n query.filter('user =', user)\n return query.get()", "def has_super_access():\n current_user = frappe.get_doc('User', frappe.session.user)\n roles = set([role.role for role in current_user.roles])\n return bool(roles & {'Administrator', 'Instructor', 'Education Manager', 'System Manager', 'Academic User'})", "def is_member(self, username):\n usernames = [user.username for user in self.members]\n return True if username in usernames else False", "def is_prime_member(user: User) -> bool:\n if not user:\n raise TypeError('user should not be None')\n return user.name.startswith('W')", "def test_is_admin_user(self):\n admin = User.objects.get(email='testadminuser@test.com')\n self.assertEqual(admin.is_staff, True)", "def is_faculty(user):\n return Affil.objects.filter(user=user).exists() or \\\n faculty_courses_for_user(user).exists()", "def show_staff_ui(self):\n return self.is_course_staff and not self.in_studio_preview", "def verify_user(self):\n verified = False\n if self.user.role.role_name == \"Admin\":\n verified = True\n\n return verified", "def has_object_permission(self, request, view, obj):\n if request.user.is_manager or request.user.is_staff or request.user.is_superuser:\n return True\n try:\n return request.user in obj.course.instructors.all()\n except AttributeError:\n # activitylevel => has no course element\n return request.user.is_instructor", "def has_student(self, user, allow_superusers=True):\n return (user.is_superuser and allow_superusers) or len(self.students.filter(id=user.id)) > 0", "def CAN_MODERATE(article, user): # pylint: disable=invalid-name\r\n return _is_staff_for_article(article, user)", "def has_instructor(self, user, allow_superusers=True):\n return (user.is_superuser and allow_superusers) or len(self.instructors.filter(id=user.id)) > 0", "def has_object_permission(self, request, view, obj):\n return request.user.is_manager or request.user.is_staff", "def user_present(ctx: Context, channel: TextChannel) -> bool:\n for member in channel.members:\n if member.id == ctx.author.id:\n return True\n\n return False", "def has_access(self, user):\n if user.is_superuser:\n return True\n return self.user_objects(user).filter(id=self.id).exists()", "async def is_admin(ctx):\n member = ctx.message.author\n aRole = discord.utils.get(member.guild.roles, name=ROLE_AD)\n if aRole in member.roles or member.id == 715048392408956950: return True", "def is_administrator(self):\n return self.rol == ProfileRoles.ADMINISTRATOR or self.user.is_staff", "def is_usermanager(self):\n return self.can(Permission.CRUD_USERS)", "def test_get_user_role_staff(self):\r\n add_users(self.global_admin, CourseStaffRole(self.course_key), self.staff)\r\n self.assertEqual(\r\n 'staff',\r\n get_user_role(self.staff, self.course_key)\r\n )", "def user_is_admin(userobj):\n from .node import Node\n from .subject import Subject\n from .period import Period\n from .assignment import Assignment\n return user_is_basenodeadmin(userobj, Node, Subject, Period, Assignment)", "def has_authority(self, user):\n UserModel = get_user_model()\n ADMINISTRATOR = UserModel.ROLE_MAP[UserModel.ADMINISTRATOR]\n result = True\n\n if not (user.is_superuser or user.role == ADMINISTRATOR):\n try:\n self.memberships.get(user=user)\n except Membership.DoesNotExist:\n result = False\n\n return result", "def has_user(self, user, allow_superusers=True):\n return self.has_student(user, allow_superusers) or self.has_ta(user, False) or self.has_instructor(user, False)", "def user_auth_inst(request):\n if request.user.is_authenticated:\n user = User.objects.get(email=request.user.email)\n if UserInformation.objects.filter(user=user).exists():\n inst = UserInformation.objects.get(user=user)\n if(inst.user_instructor):\n return True\n return False", "def CAN_ASSIGN(article, user): # pylint: disable=invalid-name\r\n return _is_staff_for_article(article, user)", "def isAdmin(self, user):\r\n if user.id in self.admins:\r\n return True\r\n return False", "def is_student(self):\n return bool(LTI_ROLES[STUDENT] & self.roles)", "def is_admin(self, user):\n return user.name in self.admins", "def is_team_member(session, api, team, user):\n teams = session.get(\"teams\", {})\n # Check to see if their permissions are still valid\n if teams and team in teams and TimeUtils.get_local_timestamp() < teams[team][1]:\n return teams[team][0]\n\n is_member = api.is_member(team, user)\n logger.info(\"User '%s' member status of '%s': %s\" % (user, team, is_member))\n teams[team] = (is_member, TimeUtils.get_local_timestamp() + settings.PERMISSION_CACHE_TIMEOUT)\n session[\"teams\"] = teams\n return is_member", "def has_access(user, role):\r\n if not user.is_active:\r\n return False\r\n # do cheapest check first even tho it's not the direct one\r\n if GlobalStaff().has_user(user):\r\n return True\r\n # CourseCreator is odd b/c it can be disabled via config\r\n if isinstance(role, CourseCreatorRole):\r\n # completely shut down course creation setting\r\n if settings.FEATURES.get('DISABLE_COURSE_CREATION', False):\r\n return False\r\n # wide open course creation setting\r\n if not settings.FEATURES.get('ENABLE_CREATOR_GROUP', False):\r\n return True\r\n\r\n if role.has_user(user):\r\n return True\r\n # if not, then check inferred permissions\r\n if (isinstance(role, (CourseStaffRole, CourseBetaTesterRole)) and\r\n CourseInstructorRole(role.course_key).has_user(user)):\r\n return True\r\n return False", "def test_staff_permission_required(self, username, is_staff, expected_status):\n UserFactory(username=username, password='edx', is_staff=is_staff)\n self.client.login(username=username, password='edx')\n response = self.client.get(self.path())\n assert response.status_code == expected_status\n\n response = self.client.post(self.path(), {'user_id': username, 'action': 'allow'}, format='json')\n assert response.status_code == expected_status", "def has_object_permission(self, request, view, account):\n if request.user.is_authenticated():\n if request.user.is_staff:\n return True\n return account.username == request.user.username\n return False", "def can_edit(self, user):\n return self.author_id == user.id or user.is_staff", "def get_is_por_holder(self, obj):\n user = self.context['request'].user\n if not user.is_authenticated:\n return False\n # pylint: disable=no-member\n profile = UserProfile.objects.get(user=user)\n if obj in profile.get_club_privileges():\n return True\n return False", "def isAdmin(user):\n return isUserType(user, Admin)", "def is_mutable_by(self, user, perm='site.change_localsite'):\n return user.has_perm(perm) or self.admins.filter(pk=user.pk).exists()", "def is_admin(user):\n return get_organisations_as_admin(user).count() > 0", "def test_func(self):\n member_to_view = self.get_object()\n is_self = self.request.user.rfid == member_to_view.rfid\n view_others = self.request.user.has_permission(\"core.view_member\")\n return view_others or is_self", "def is_user(id):\n return id.startswith('U')", "def is_member(request):\n if request.method == \"GET\":\n user_id = request.GET.get('user_id', None)\n board_id = request.GET.get('board_id', None)\n if Member.objects.get(board_id=board_id, user_id=user_id).exists():\n return Response({\"is_member\": True})\n else:\n return Response({\"is_member\": False})", "def check_access(self):\n if not has_course_staff_privileges(self.request.user, self.course.id):\n raise PermissionDenied(\n \"To manage team membership of {}, you must be course staff.\".format(\n self.course.id\n )\n )", "def is_admin(self,user):\n if user.is_superuser:\n return True\n\n if user.groups.filter(name=self.admin_group_name).count() > 0:\n return True\n else:\n return False", "def is_instructor(self):\n return bool(LTI_ROLES[INSTRUCTOR] & self.roles)", "def is_bothell_student():\n return _is_member('uw_affiliation_bothell-student')", "def is_admin_user(self):\n if \"is_admin\" in self._properties and self.is_admin == 'YES':\n return True\n return False", "def is_regular_user(user):\n return user.is_authenticated()", "def is_participant(self,user):\n if user.is_superuser:\n return True\n\n if user.groups.filter(name=self.participants_group_name).count() > 0:\n return True\n else:\n return False", "def is_accessible_by(self, user):\n return (self.public or\n (user.is_authenticated and\n (user.is_staff or self.users.filter(pk=user.pk).exists())))", "def _have_permission(self, user: discord.User, in_guild: discord.Guild) -> bool:\n guild = connector.getGuildByID(in_guild.id)\n\n return (guild.moderator_role_id in [role.id for role in user.roles]) or (in_guild.owner == user)", "def _have_permission(self, user: discord.User, in_guild: discord.Guild) -> bool:\n guild = connector.getGuildByID(in_guild.id)\n\n return (guild.moderator_role_id in [role.id for role in user.roles]) or (in_guild.owner == user)", "def CAN_ASSIGN_OWNER(article, user): # pylint: disable=invalid-name\r\n return _is_staff_for_article(article, user)", "def is_superuser(self):\n sesh = self.get_session()\n return sesh.curr_role == 'admin'", "def is_admin(self):\n if self.user is None:\n return False\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n if self.user.is_admin:\n return True\n return False", "def user_is_subjectadmin(userobj):\n from .subject import Subject\n return user_is_basenodeadmin(userobj, Subject)", "def _has_staff_access_to_descriptor(user, descriptor, course_key):\r\n return _has_staff_access_to_location(user, descriptor.location, course_key)", "def set_is_staff(self, role):\n self.is_staff = (role != User.ROLE_USER)", "def check_is_admin(current_user):\n return current_user['isAdmin'] == True", "def is_user_admin(self, user):\n return user == self.created_by", "def is_user(self, user='') -> int:\n try:\n if user in self.users:\n return(1)\n else:\n return(0)\n except Exception as error:\n print(f\"Error: self.is_user({user}) -> {error}\")", "def isAdmin():\n\tif 'username' in session and session['username'] == 'admin':\n\t\treturn True\n\telse:\n\t\treturn False", "def user_is_student(userobj):\n from .assignment_group import AssignmentGroup\n return AssignmentGroup.published_where_is_candidate(userobj).exists()", "def is_participant(self, user) -> bool:\n return (\n user.is_superuser\n or user.groups.filter(pk=self.participants_group.pk).exists()\n )", "def get_viewable(self, user):\n if user.get('role') in ('admin', 'manager', 'engineer'):\n return True\n return user['name'] == self.doc.get('customer')", "def user_is_periodadmin(userobj):\n from .period import Period\n return user_is_basenodeadmin(userobj, Period)", "def check_user(self):\n try:\n if self.get_customer()[0][0] == self.dni:\n return True\n else:\n return False\n except:\n return False", "def is_member(self, account):\n return self.find_entry_for(account, key_only=True) is not None", "def is_logged_in():\n return 'user' in session", "def is_usermanager(self):\n return False" ]
[ "0.8091983", "0.79417914", "0.79417914", "0.79417914", "0.78373754", "0.7725267", "0.7695455", "0.7543704", "0.7538521", "0.74671674", "0.74179083", "0.740949", "0.740872", "0.7405209", "0.7117188", "0.7050242", "0.7050242", "0.7018241", "0.694894", "0.68767804", "0.685789", "0.6831453", "0.68031365", "0.67642087", "0.67447615", "0.6734352", "0.6723223", "0.6650916", "0.6624902", "0.6579338", "0.65577245", "0.6550394", "0.6546509", "0.65304404", "0.6528451", "0.65214944", "0.64780164", "0.64722186", "0.6454196", "0.64506584", "0.64497733", "0.6420618", "0.6419102", "0.6412704", "0.64056563", "0.64018387", "0.6389255", "0.6386641", "0.6386638", "0.63858026", "0.6383387", "0.63806707", "0.6361385", "0.6359756", "0.6341367", "0.63325614", "0.6321165", "0.6316764", "0.6309415", "0.6308557", "0.63084245", "0.6302706", "0.62962985", "0.62960124", "0.62948346", "0.6292858", "0.62879694", "0.6273254", "0.6269982", "0.6266089", "0.6265471", "0.62342566", "0.623222", "0.6224691", "0.6222983", "0.6216324", "0.62158865", "0.62130815", "0.6197196", "0.6192133", "0.6192133", "0.6179083", "0.6173221", "0.61703104", "0.6170011", "0.6168375", "0.6167696", "0.616258", "0.6161334", "0.6160815", "0.6151699", "0.6137683", "0.61307186", "0.6124627", "0.61148745", "0.6111155", "0.61088544", "0.610009", "0.60989916" ]
0.76052535
8
Is the user a superuser?
def is_superuser(self): return self.is_admin
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isSuper(self):\n user = self.getSession()\n return self.pipe.auth.isSuper(user)", "def is_superuser():\n if sys.version > \"2.7\":\n for uid in os.getresuid():\n if uid == 0:\n return True\n else:\n if os.getuid() == 0 or os.getegid() == 0:\n return True\n return False", "def is_superuser(self):\n sesh = self.get_session()\n return sesh.curr_role == 'admin'", "def user_is_admin_or_superadmin(userobj):\n if userobj.is_superuser:\n return True\n else:\n return user_is_admin(userobj)", "def is_superuser(connection, window_info, kwargs):\n return window_info and window_info.is_superuser", "def is_user_admin(request):\n return request.user.is_superuser", "def has_super_access():\n current_user = frappe.get_doc('User', frappe.session.user)\n roles = set([role.role for role in current_user.roles])\n return bool(roles & {'Administrator', 'Instructor', 'Education Manager', 'System Manager', 'Academic User'})", "def isSuperUser(token):\n try:\n decoded = jwt.decode(token, SECRET_KEY, algorithms=['HS256'])\n if decoded['is_superuser'] == True:\n return True\n except:\n return False", "def test_func(self):\n return self.request.user.is_superuser", "def super_user(self) -> Optional[str]:\n return pulumi.get(self, \"super_user\")", "def is_not_admin(user):\n return not user.is_superuser", "def superuser(request):\n response_status_code = status.HTTP_403_FORBIDDEN\n\n username = request.POST.get('username')\n user = None\n\n user_class = get_user_model()\n try:\n service_identifiers = Service.objects.all().values_list('identifier', flat=True)\n user = user_class.objects.exclude(username__in=service_identifiers).get(username=username, is_active=True)\n except user_class.DoesNotExist:\n pass\n\n if user and user.is_superuser:\n response_status_code = status.HTTP_200_OK\n\n logger.info('MQTT is super user check for user \"{}\": {}'.format(\n username, 'True' if response_status_code == status.HTTP_200_OK else 'False'))\n\n return HttpResponse(status=response_status_code)", "def test_if_allowed_for_superusers_permissions(self):\r\n res = self.client_superuser.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def is_admin(self):\n if not self.current_user:\n return False\n else:\n return self.current_user in [\"1\"]", "def has_student(self, user, allow_superusers=True):\n return (user.is_superuser and allow_superusers) or len(self.students.filter(id=user.id)) > 0", "def is_current_user_admin():\n return (os.environ.get('USER_IS_ADMIN', '0')) == \"1\"", "def has_user(self, user): # pylint: disable=unused-argument\r\n return False", "def user_is_root():\n return os.geteuid() == 0", "def has_object_permission(self, request, view, user):\n return user == request.user or request.user.is_superuser", "def is_regular_user(user):\n return user.is_authenticated()", "def is_admin(self,user):\n if user.is_superuser:\n return True\n\n if user.groups.filter(name=self.admin_group_name).count() > 0:\n return True\n else:\n return False", "def is_user_admin(self, user):\n return user == self.created_by", "def is_user(self, user='') -> int:\n try:\n if user in self.users:\n return(1)\n else:\n return(0)\n except Exception as error:\n print(f\"Error: self.is_user({user}) -> {error}\")", "def is_admin_user(self):\n if \"is_admin\" in self._properties and self.is_admin == 'YES':\n return True\n return False", "def test_superuser_permission_with_super_user(self):\n with self.settings(MAINTENANCE_MODE_PERMISSION_PROCESSORS=(\n 'maintenancemode.permission_processors.is_superuser',\n )):\n self.client.login(username='super_user', password='maintenance_pw')\n response = self.client.get('/')\n self.assertNormalMode(response)", "def test_user_is_superuser(self):\n super_user = self.create_superuser()\n regular_user = self.create_user()\n thread = self.create_thread(status='deleted')\n message = thread.first_message\n self.assertTrue(message.visible_to_user(super_user))\n self.assertFalse(message.visible_to_user(regular_user))", "def is_user_root():\n return (True if os.getuid() == 0 else False)", "def has_ta(self, user, allow_superusers=True):\n return (user.is_superuser and allow_superusers) or len(self.tas.filter(id=user.id)) > 0", "def is_admin(user):\n return user.groups.filter(name='Profesores').exists()", "def has_add_permission(self, request):\n return request.user.is_superuser or super().has_add_permission(request)", "def superuser_required(view_func):\n return user_passes_test(lambda u: u.is_superuser, login_url='/', redirect_field_name=None)(view_func)", "def test_registeration_for_a_super_user(self):\n admin_user = User.objects.create_superuser(\n 'jey',\n 'jey@gmail.com',\n 'jemo'\n )\n self.assertEqual(admin_user.is_active, True)\n self.assertEqual(admin_user.is_staff, True)\n self.assertEqual(admin_user.is_superuser, True)", "def is_admin(self, user) -> bool:\n return (\n user.is_superuser\n or user.groups.filter(pk=self.admins_group.pk).exists()\n )", "def is_staff(self):\r\n return self.is_admin", "def has_user(self, user, allow_superusers=True):\n return self.has_student(user, allow_superusers) or self.has_ta(user, False) or self.has_instructor(user, False)", "def has_access(self, user):\n if user.is_superuser:\n return True\n return self.user_objects(user).filter(id=self.id).exists()", "def is_administrator(self):\n return False", "def user_is_admin(user):\n return user in admins", "def has_perm(self, user):\n return True", "def is_staff(self):\n\t\treturn self.is_admin", "def is_user_context(context):\n if not context or not isinstance(context, RequestContext):\n return False\n if context.is_admin:\n return False\n return True", "def is_staff(self):\n return self.is_admin", "def is_staff(self):\n return self.is_admin", "def has_root():\n return bool(shell32.IsUserAnAdmin())", "def is_user_allowed(self, user):\n return user.is_staff", "def is_super_manager(self, loginID):\n self.cursor.execute(\"\"\"SELECT managerID FROM managercredentials WHERE loginID=%s\"\"\", (loginID,))\n user_key = self.cursor.fetchone()[0]\n self.cursor.execute(\"\"\"SELECT MIN(managerID) FROM managercredentials\"\"\")\n if user_key == self.cursor.fetchone()[0]:\n return True\n return False", "def is_owner_or_privileged_user(obj_user, request):\n return (\n obj_user == request.user or request.user.is_superuser or is_admin_user(request)\n )", "def has_object_permission(self, request, view, obj):\n if request.user and (request.user.is_staff or request.user.is_superuser):\n return True\n return super().has_object_permission(request, view, obj)", "def is_admin(user):\n return user.is_authenticated and user.id == app.config.get('ADMIN')", "def isAdmin(user):\n return isUserType(user, Admin)", "def is_staff_user(self):\n\n return self.is_staff", "def has_object_create_permission(self, request):\n user = request.user\n if user.is_superuser:\n return user.is_superuser\n\n return self.user == user", "def is_user_cloud_admin(self):\n user = users.get_current_user()\n if not user:\n return False\n try:\n user_info = self.get_by_id(UserInfo, user.email())\n if user_info:\n return user_info.is_user_cloud_admin\n else:\n return False\n except Exception as err:\n logging.exception(err)\n return False", "def testSuperUserPermission(self):\n self.login_user(self.superuser)\n response = self.client.get(self.url, self.args)\n self.assertEqual(response.status_code, 200)", "def test_create_super_user(self):\n user = get_user_model().objects.create_superuser(\"test@asd.com\", \"test123\")\n\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def is_admin(self, user):\n return user.name in self.admins", "def isAdmin():\n\tif 'username' in session and session['username'] == 'admin':\n\t\treturn True\n\telse:\n\t\treturn False", "def is_main_admin(self):\n if self.user is None:\n return False\n return self.user.has_permission(\"admin\")", "def check_is_admin(current_user):\n return current_user['isAdmin'] == True", "def test_superuser():\n assert os.geteuid() == 0, \"Need ROOT access in order to run tests.\"", "def test_create_super_user(self):\n user = get_user_model().objects.create_superuser(\n email='admin@1234',\n password='adminpassword'\n )\n\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def isAdmin(self, user):\r\n if user.id in self.admins:\r\n return True\r\n return False", "def can_save(self, user_obj):\n if user_obj.is_superuser:\n return True\n elif self.parentnode:\n return self.parentnode.is_admin(user_obj)\n else:\n return False", "def can_save(self, user_obj):\n if user_obj.is_superuser:\n return True\n elif self.parentnode:\n return self.parentnode.is_admin(user_obj)\n else:\n return False", "def isroot():\n\treturn (os.geteuid() == 0)", "def is_staff(self) -> bool:\n return self.is_admin", "def is_admin():\n if platform_is(WINDOWS):\n return windll.shell32.IsUserAnAdmin()\n return os.getuid() == 0", "def user_is_admin(userobj):\n from .node import Node\n from .subject import Subject\n from .period import Period\n from .assignment import Assignment\n return user_is_basenodeadmin(userobj, Node, Subject, Period, Assignment)", "def is_staff(request):\n\n if request:\n if hasattr(request, 'user') and request.user.is_authenticated():\n return request.user.is_staff\n return False", "def test_if_created_superusers_permissions(self):\r\n payload = {\r\n \"email\": \"t@t.pl\",\r\n \"password\": \"password\",\r\n \"name\": \"asdasd\",\r\n \"is_superuser\": False,\r\n }\r\n\r\n res = self.client_superuser.post(reverse(CREATE_USER_URL),data=payload)\r\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)", "def detect_users(conn, test_user):\n # Check the user we're connected as is a cluster superuser\n username = conn.scalar(text(\"VALUES (CURRENT_USER)\"))\n super_user = conn.scalar(text(\n \"SELECT usesuper FROM pg_user WHERE usename = CURRENT_USER\"))\n if not super_user:\n raise RuntimeError(\"User %s is not a cluster superuser\" % username)\n # Check the \"normal\" user exists and warn if it's a cluster superuser\n super_user = conn.scalar(text(\n \"SELECT usesuper FROM pg_user WHERE usename = :user\"), user=test_user)\n if super_user is None:\n raise RuntimeError(\"User %s doesn't exist as a cluster user\" % test_user)\n if super_user:\n raise RuntimeError(\"User %s is a cluster superuser; this is not \"\n \"recommended\" % test_user)", "def check_enableusersite():\r\n if sys.flags.no_user_site:\r\n return False\r\n\r\n if hasattr(os, \"getuid\") and hasattr(os, \"geteuid\"):\r\n # check process uid == effective uid\r\n if os.geteuid() != os.getuid():\r\n return None\r\n if hasattr(os, \"getgid\") and hasattr(os, \"getegid\"):\r\n # check process gid == effective gid\r\n if os.getegid() != os.getgid():\r\n return None\r\n\r\n return True", "def ensure_superuser_exists(*args, **kwargs):\n from django.contrib.auth.models import User\n username = settings.ADMIN_USERNAME\n password = settings.ADMIN_PASSWORD\n logger = logging.getLogger('otree')\n if User.objects.filter(username=username).exists():\n # msg = 'Default superuser exists.'\n # logger.info(msg)\n return True\n if not password:\n return False\n assert User.objects.create_superuser(username, email='',\n password=password)\n msg = 'Created superuser \"{}\"'.format(username)\n logger.info(msg)\n return True", "def super_user_required(func):\n\n @functools.wraps(func)\n def __wrapper(request, *args, **kwds):\n \"\"\"Makes it possible for super_user_required to be used as a decorator.\"\"\"\n if request.profile.is_superuser:\n return func(request, *args, **kwds) # pylint: disable-msg=W0142\n else:\n return utility.forbidden(\n request,\n error_message='You must be a superuser to view this page.')\n\n return __wrapper", "def has_object_permission(self, request, view, obj):\n is_grupo_usuario_admin = request.user.grupo.name == \"Administrador\"\n is_grupo_obj_superuser = (obj.grupo.name == \"SuperUsuario\" or\n obj.grupo.name == \"Administrador\")\n if is_grupo_usuario_admin and is_grupo_obj_superuser:\n return False\n\n return True", "def is_admin(self, username):\n con = dbcon()\n cur = con.cursor()\n cur.execute(\"SELECT * FROM my_users WHERE username=%(username)s\",\\\n {\"username\":username})\n res = cur.fetchone()\n if res[5].lower() == 'admin':\n return True\n return False", "def is_admin():\n if os.name == 'nt':\n try:\n # Only Windows users with admin privileges can read \n # the C:\\windows\\temp directory.\n os.listdir(os.sep.join([os.environ.get('SystemRoot','C:\\\\windows'),'temp']))\n except:\n return False\n else:\n return True\n else:\n # Root has UID 0 on Unix systems.\n if 'SUDO_USER' in os.environ and os.geteuid() == 0:\n return True\n else:\n return False", "def _is_staff_for_article(article, user):\r\n return user.is_staff or user.is_superuser or user_is_article_course_staff(user, article)", "def IsPrivilegedUser(user_email, is_admin):\n return is_admin or (user_email and user_email.endswith('@google.com'))", "def has_instructor(self, user, allow_superusers=True):\n return (user.is_superuser and allow_superusers) or len(self.instructors.filter(id=user.id)) > 0", "def test_user_is_superuser(self):\n self.user.is_superuser = True\n self.user.save()\n self.assertEqual(\n Thread.public.get_by_user(\n thread_id=self.thread.pk, user=self.user),\n self.thread\n )", "def _is_user_context(context):\n if not context:\n return False\n if context.is_admin:\n return False\n if not context.user_id or not context.project_id:\n return False\n return True", "def _is_user_context(context):\n if not context:\n return False\n if context.is_admin:\n return False\n if not context.user_id or not context.project_id:\n return False\n return True", "def is_admin(username: str) -> bool:\n db = get_db()\n return int(db.get_user_by_name(username)[\"is_admin\"]) == 1", "def test_func(self):\n if not self.request.user.is_authenticated:\n return False\n if self.request.user.is_staff:\n return True\n return self.get_user() == self.request.user", "def check_admin() -> bool:\n return ctypes.windll.shell32.IsUserAnAdmin() == 1", "def is_usermanager(self):\n return False", "def admin_user_exists(self):\n try:\n User.objects.get(username='admin')\n except User.DoesNotExist:\n return False\n\n return True", "def _check_remove_last_super(user_obj):\n if not user_obj.is_superuser:\n return\n\n # Is there any other active superuser left?\n all_active_su = User.objects.filter(is_superuser__exact = True,\n is_active__exact = True)\n num_active_su = all_active_su.count()\n assert num_active_su >= 1, _(\"No active superuser configured.\")\n if num_active_su == 1:\n raise PopupException(_(\"You cannot remove the last active superuser from the configuration.\"), error_code=401)", "def is_staff(self):\n # Simplest possible answer: All admins are staff\n return self.is_admin", "def is_staff(self):\n # Simplest possible answer: All admins are staff\n return self.is_admin", "def is_staff(self):\n # Simplest possible answer: All admins are staff\n return self.is_admin", "def is_usermanager(self):\n return self.can(Permission.CRUD_USERS)", "def is_admin(user):\n return get_organisations_as_admin(user).count() > 0", "def can_edit_user(user):\n\tu = current_user._get_current_object()\n\treturn u==user or u.is_admin()", "def get_is_admin(self, obj):\n try:\n user = self.context.get('request').user\n except Exception:\n # raise serializers.ValidationError('Could not access request.user')\n return False\n if user == obj.admin:\n return True\n else:\n return False", "def is_admin(self):\n if self.user is None:\n return False\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n if self.user.is_admin:\n return True\n return False", "def has_user(self):\n\t\treturn len( self.a_token ) > 0 and len( self.a_secret ) > 0", "def admin_flag(user_id):\n user = User.query.filter_by(id=user_id).first()\n if user.is_admin:\n return True\n return False", "def _is_privileged_user(email):\n if local_config.AuthConfig().get('all_users_privileged'):\n return True\n\n privileged_user_emails = (db_config.get_value('privileged_users') or\n '').splitlines()\n return any(\n utils.emails_equal(email, privileged_user_email)\n for privileged_user_email in privileged_user_emails)" ]
[ "0.8203781", "0.8123163", "0.7936285", "0.77750057", "0.76693565", "0.7637397", "0.757354", "0.7337289", "0.7219452", "0.713708", "0.70978487", "0.7031901", "0.69242924", "0.69207996", "0.6917187", "0.6854157", "0.68328345", "0.68302256", "0.6777356", "0.6768828", "0.6747381", "0.67279756", "0.67147404", "0.67134607", "0.6709543", "0.66998076", "0.6679774", "0.6645155", "0.662206", "0.6602015", "0.65956557", "0.6581775", "0.6581422", "0.6581202", "0.65735424", "0.65695417", "0.65377164", "0.65308374", "0.65279293", "0.65169865", "0.650583", "0.64830047", "0.64830047", "0.6458175", "0.64491314", "0.6441866", "0.6437345", "0.64316136", "0.64304066", "0.64271617", "0.6424881", "0.64186937", "0.6413111", "0.6412418", "0.6411531", "0.6394908", "0.63892037", "0.63815683", "0.6364445", "0.636228", "0.6351938", "0.6347132", "0.6333182", "0.6333182", "0.6330271", "0.6317534", "0.6312057", "0.62991625", "0.62949574", "0.6293366", "0.62812454", "0.6254621", "0.62365717", "0.6226314", "0.6210904", "0.62042123", "0.61972064", "0.6194501", "0.6184061", "0.6179563", "0.6170453", "0.61589324", "0.61589324", "0.615863", "0.6155888", "0.6151552", "0.6151203", "0.6149977", "0.61464596", "0.61397606", "0.61397606", "0.61397606", "0.6129804", "0.6124261", "0.6119653", "0.6117185", "0.6116476", "0.6095404", "0.60902494", "0.6086899" ]
0.80404836
2
Normalize and then split tags on commas and spaces. Empty tags are removed.
def split_commaseparated_tags(cls, commaseparatedtags): if commaseparatedtags.strip() == '': return [] else: return [ cls.normalize_tag(tagstring) for tagstring in list([_f for _f in re.split(r'[,\s]', commaseparatedtags) if _f])]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def splitTags(user_input):\n \n elements = []\n if ',' in user_input:\n elements = user_input.split(',')\n elif ' ' in user_input:\n elements = user_input.split(' ')\n else:\n elements.append(user_input)\n\n tags = []\n for element in elements:\n element = element.strip(' \\t\\n\\r').lower()\n if(len(element) == 0): continue\n if element not in tags:\n tags.append(element)\n return tags", "def sanitise_tags(tags):\n\n # hack out all kinds of whitespace, then split on ,\n # if you run into more illegal characters (simplenote does not want to sync them)\n # add them to the regular expression above.\n illegals_removed = tags_illegal_chars.sub('', tags)\n if len(illegals_removed) == 0:\n # special case for empty string ''\n # split turns that into [''], which is not valid\n return []\n\n else:\n return illegals_removed.split(',')", "def transform_tags(self, instance):\n return instance.tags.split(',')", "def dataNormalize(data):\n tr = re.sub('<(.*)>|\\\\n', '', data.prettify())\n return re.sub(' +', ',', tr).split(',')", "def validateTags(self, tags):\n\t\treturn tags.replace(', ',' ')", "def normalize_tags(tags):\n return {normalize(tag) for tag in tags}", "def normalized_pos_tags(self):\n pos_list = []\n for pos in self.pos_tags:\n pos_list.extend([i for i in re.split('[:;]', pos) if i != ''])\n return pos_list", "def strip_tags(tagged_sentences):\n untagged_sentences = []\n for taggedsent in tagged_sentences:\n untaggedsent = ''\n\tfor taggedword in taggedsent.split():\n\t word = re.split('(?<!\\\\\\)\\/', taggedword)[0]\n untaggedsent += word + ' '\n #print untaggedsent\n untagged_sentences.append(untaggedsent)\n return untagged_sentences", "def _get_norm_tags(self, tags):\n norm_tags = []\n for tag in tags:\n lang = tag[0:2]\n norm_tags.append(lang + ':' + self.tag_manager.normalize_tag_wtokenization(tag, self.tries[lang]))\n return norm_tags", "def clean_tag(elmt_with_commas, max_lenght):\r\n elmt_list = elmt_with_commas.split(\",\")\r\n elmt_list = [e.strip() for e in elmt_list if len(e) < max_lenght]\r\n return elmt_list", "def process_tags(tags=list):\n new_tag_list = list()\n for tag in tags:\n new_tag = tag.replace(\"<\", \" \")\n new_tag = new_tag.replace(\">\", \" \")\n new_tag = new_tag.split()\n # sort elements by string length (this to avoid 'c' being checked before 'c++', etc)\n new_tag.sort(key=len, reverse=True)\n new_tag_list.append(new_tag)\n return new_tag_list", "def parse_tags(tags: str = None) -> Iterable[str]:\n if not tags:\n return []\n return [x.strip() for x in tags.split(\",\")]", "def change_tags_format(page_tags):\n\treturn [tags.replace('\\n', ', ') if not tags == None else None for tags in page_tags]", "def parse_tag_input(input):\r\n if not input:\r\n return []\r\n\r\n input = force_unicode(input)\r\n\r\n # Special case - if there are no commas or double quotes in the\r\n # input, we don't *do* a recall... I mean, we know we only need to\r\n # split on spaces.\r\n if u',' not in input and u'\"' not in input:\r\n words = list(set(split_strip(input, u' ')))\r\n words.sort()\r\n return words\r\n\r\n words = []\r\n buffer = []\r\n # Defer splitting of non-quoted sections until we know if there are\r\n # any unquoted commas.\r\n to_be_split = []\r\n saw_loose_comma = False\r\n open_quote = False\r\n i = iter(input)\r\n try:\r\n while 1:\r\n c = i.next()\r\n if c == u'\"':\r\n if buffer:\r\n to_be_split.append(u''.join(buffer))\r\n buffer = []\r\n # Find the matching quote\r\n open_quote = True\r\n c = i.next()\r\n while c != u'\"':\r\n buffer.append(c)\r\n c = i.next()\r\n if buffer:\r\n word = u''.join(buffer).strip()\r\n if word:\r\n words.append(word)\r\n buffer = []\r\n open_quote = False\r\n else:\r\n if not saw_loose_comma and c == u',':\r\n saw_loose_comma = True\r\n buffer.append(c)\r\n except StopIteration:\r\n # If we were parsing an open quote which was never closed treat\r\n # the buffer as unquoted.\r\n if buffer:\r\n if open_quote and u',' in buffer:\r\n saw_loose_comma = True\r\n to_be_split.append(u''.join(buffer))\r\n if to_be_split:\r\n if saw_loose_comma:\r\n delimiter = u','\r\n else:\r\n delimiter = u' '\r\n for chunk in to_be_split:\r\n words.extend(split_strip(chunk, delimiter))\r\n words = list(set(words))\r\n words.sort()\r\n return words", "def split_corpus_tags(self, corpus):\n logging.info('Dividindo texto das tags')\n sentences = []\n tags = []\n dict_tags = {}\n for sentence in corpus:\n sentence_tmp = sentence.replace(\"\\n\", '')\n words_tmp = []\n tags_tmp = []\n words = sentence_tmp.split(\" \")\n for word in words:\n tag_word = word.split(\"_\")\n if tag_word[0] == \"\": pass\n else:\n words_tmp.append(tag_word[0])\n tags_tmp.append(tag_word[1])\n if not tag_word[1] in dict_tags.keys(): \n dict_tags[tag_word[1]] = {}\n dict_tags[tag_word[1]]['right'] = 0\n dict_tags[tag_word[1]]['pred'] = 0\n dict_tags[tag_word[1]]['pres'] = 1\n else: dict_tags[tag_word[1]]['pres'] += 1\n sentences.append(words_tmp)\n tags.append(tags_tmp)\n return sentences, tags, dict_tags", "def split(self, text):\n\n return [x.strip() for x in text.split(\",\")]", "def preprocess(self, sentence, vocab_set=None):\n tokens = sentence.split()\n new_tokens = []\n for token in tokens:\n new_tokens += self.__clean(token)\n tokens = new_tokens\n\n tokens = self.__normalize_document(' '.join(tokens))\n\n return tokens", "def _preprocess(self, tagged: List[Tuple]) -> Tuple:\n ori = \" \".join([tag[0] for tag in tagged])\n tags = [tag[1] for tag in tagged]\n # Mapping into general tagset\n tags = [self._map[tag] if tag in self._map else \"X\" for tag in tags]\n return \" \".join(tags), ori", "def tags(self):\n return tuple([x.strip() for x in self._dict.get('tags').split(',')])", "def tags_list(self):\n return [tag.strip() for tag in self.tags.split(',')]", "def parse_normalized(line):\n return line.strip().split(',')", "def _postprocess(self, tags: List[str], words: List[str], pos: List[str]):\n result = list()\n\n i = 0\n for tag in tags:\n if (\"<\" not in tag) and (\">\" not in tag):\n if pos:\n result.append(f\"{words[i]}/{pos[i]}\")\n else:\n result.append(words[i])\n i += 1\n else:\n result.append(tag)\n\n return \" \".join(result)", "def _postprocess(\n self,\n tags: List[str],\n words: List[str],\n pos: bool = False,\n ):\n result = list()\n\n i = 0\n for tag in tags:\n if (\"<\" not in tag) and (\">\" not in tag):\n if pos:\n result.append(f\"{words[i]}/{pos[i]}\")\n else:\n result.append(words[i])\n i += 1\n else:\n result.append(tag)\n\n return \" \".join(result)", "def normalize(self, text, cleaned=False, **kwargs):\n if not cleaned:\n text = self.clean(text, **kwargs)\n return ensure_list(text)", "def normalize_text_sentences(text,pad_punc='!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~',remove_punc='!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~'):\n normalize_text_list=[]\n for sent in list(sent_tokenize(text)):\n normalize_text_list.append(normalize_text(sent,pad_punc=pad_punc,remove_punc=remove_punc))\n return normalize_text_list", "def normalize(s):\n # Remove leading/trailing whitespace\n s = s.strip()\n # Remove trailing comma\n if s.endswith(\",\"):\n s = s[:-1]\n\n return s", "def tags_from_csv_field(tag_string):\n split_string = tag_string.split()\n out_list = []\n for tag in split_string:\n out_list.append(clean_tag(tag))\n\n return out_list", "def recovTextBetweenTags(texts: str, separator: str): \n text_clean = []\n lisI = []\n lisS = []\n\n for i in range(0, len(texts)):\n if str(texts[i]) == \"<\":\n lisI.append(i)\n if texts[i] == '>':\n lisS.append(i)\n\n len_lis = len(lisI)\n for h in range(0, len_lis):\n if h < (len_lis-1):\n text_clean.append(texts[lisS[h]:lisI[h+1]])\n\n if separator != 'non':\n description = str(text_clean).replace('>', '').replace(\n ',', '').replace('\\'', '').replace(',', '')\n description = description.split(separator)\n else:\n description = text_clean\n\n return description", "def __cleaning_split(self, text: str) -> list:\n splitted_word = self.__clean_text(text)\n splitted_word = splitted_word.split()\n return splitted_word", "def set_tags(self, tags):\n self.tags = []\n for tag in [t.strip() for t in tags.split(', ')]:\n self.tags.append(Tag(title=tag))", "def tokenize_helper(self, corpus_sentences):\n tokenized_corpus = []\n for sentence in corpus_sentences:\n tokens = sentence.split()\n tokenized_sentence = []\n for token in tokens:\n if token.isspace() is False:\n tokenized_sentence.append(token)\n tokenized_corpus.append(tokenized_sentence)\n\n return tokenized_corpus", "def split_by_comma_and_whitespace(s):\r\n return re.split(r'[\\s,]+', s)", "def split(text, delim=','):\n return [x.strip() for x in text.split(delim)]", "def skill_cleaner(data):\n skills_cleaned = []\n data = data.split(',')\n for element in data:\n element = element.title()\n element = element.strip()\n element = element.replace('Agile Methodologies', 'Agile')\n element = element.replace('Agile Project Management', 'Agile')\n element = element.replace('Algorithm Design', 'Algorithms')\n element = element.replace('Algorithm Analysis', 'Algorithms')\n element = element.replace('Analytical Skills', 'Analytics')\n element = element.replace('Applied Mathematics', 'Mathematics')\n element = element.replace('Business Analytics', 'Business Analysis')\n element = element.replace('Data Analytics', 'Data Analysis')\n element = element.replace('Programming Languages', 'Programming')\n element = element.replace('Big Data Analytics', 'Big Data')\n element = element.replace('HTML5', 'HTML')\n element = element.replace('Microsoft Excel', 'Excel')\n element = element.replace('Java Programming', 'Java')\n element = element.replace('MySQL', 'SQL')\n element = element.replace('Sql', 'SQL')\n element = element.replace('Optimizations', 'Optimization')\n element = element.replace('Spark', 'Apache Spark')\n element = element.replace('Sas', 'SAS')\n element = element.replace('Latex', 'LaTeX')\n skills_cleaned.append(element)\n return skills_cleaned", "def true_tags (tagged_sentences):\n tags = []\n for sent in tagged_sentences:\n tags.extend([re.split('(?<!\\\\\\)\\/', word)[1] for word in sent.split()])\n return tags", "def split_by_comma(s):\n return s.strip().split(\",\")", "def prepare_data(self, data):\n # Break string into a list of sentances\n in_sentances = tokenize.sent_tokenize(data)\n out_sentances = list()\n for sentance in in_sentances:\n # Turn each word in sentance into its lemma\n lemmas = [self.lemmatizer.lemmatize(word) for word in sentance.split(\" \")]\n # Filters out all words that fail the is_valid_lemma function\n lemmas = [lemma for lemma in lemmas if self.is_valid_lemma(lemma)]\n # Joins words back together and add to list\n sentance = ' '.join(lemmas)\n out_sentances.append(sentance)\n return out_sentances", "def space_detokenizer(batch: List[List[str]]) -> List[str]:\n return [\" \".join(tokens) for tokens in batch]", "def getTagList(tags):\n tags = tags[1:len(tags)-1]\n return tags.split('><')", "def tokenize(self, tags):\n tokenized_tags = []\n for tag in tags:\n tokenized_tags.extend(nltk.word_tokenize(tag))\n return tokenized_tags", "def clean_article(self):\n # split into tokens by white space\n tokens = self.text.split(\" \")\n # remove punctuation from each token\n table = str.maketrans('', '', punctuation)\n tokens = [w.translate(table) for w in tokens] # type: List[Any]\n # remove remaining tokens that are not alphabetic\n tokens = [word for word in tokens if word.isalpha()]\n # filter out stop words\n stop_words = set(stopwords.words('english'))\n tokens = [w for w in tokens if not w in stop_words]\n # lemmatization and lowercase\n lmtzr = WordNetLemmatizer()\n tokens = [lmtzr.lemmatize(w.lower()) for w in tokens]\n # filter out short tokens\n tokens = [word for word in tokens if len(word) > 1]\n return tokens", "def separate_comma(s):\n return s.split(',')", "def _parse_tags(self):\n tokens = self.tags_str[1:].split(\";\")\n self._tags = {\n k.strip(): v\n for token in tokens\n for k, v in [token.split(\"=\")]\n }", "def parse(text):\n # Make sure that there's text to be split\n if text == None:\n return text\n return text.split(',')", "def split_sentence(self,sentence):\n sents = self.sentence_splitter.tokenize(sentence) \n return sents", "def strip_training_tags(self, sentence=None, sep=None):\n if sentence is None:\n sentence = self.hand_tagged\n if sep is None:\n sep = self.sep\n return [w.split(sep, 1)[0] for w in sentence]", "def _prep(self, text):\n # Removing punctuation marks.\n stripped = sub(r'[^\\w\\s]', '', str(text))\n\n # Splitting the string into a list (based on spaces).\n split = stripped.split()\n\n return split", "def normalize_text(text: str) -> List[str]:\n normalized_text = []\n\n for word in text.split():\n preprocessed_word = TextProcessor.remove_punctuation(word)\n if preprocessed_word:\n normalized_text.append(preprocessed_word.lower())\n\n return normalized_text", "def normalize_tags(tags):\n ret = []\n dupes = NormalizedDict({'': 1})\n for tag in tags:\n if not dupes.has_key(tag):\n ret.append(tag)\n dupes[tag] = 1\n ret.sort(lambda x, y: cmp(normalize(x), normalize(y)))\n return ret", "def tokenize_tag(text):\n return [tok for tok in single_tokenizer(text)]", "def _preprocess(self, tagged: List[Tuple]) -> str:\n ori = \" \".join([tag[0] for tag in tagged if tag[1] != \"SPACE\"])\n sent = \" \".join([tag[1] for tag in tagged if tag[1] != \"SPACE\"])\n sent = sent.replace(\"-LRB-\", \"(\")\n sent = sent.replace(\"-RRB-\", \")\")\n return sent, ori", "def sentencesplit(doc):\n out = doc\n out = out.replace(\"? \", \"?.\")\n out = out.replace(\"! \", \"!.\")\n out = out.split(\".\")\n i = 0\n while \"\" in out or \" \" in out:\n if out[i] == \"\" or out[i] == \" \":\n out.pop(i)\n continue\n i += 1\n return out", "def split_values(self, value):\n if value:\n return [s.strip() for s in value.split(',')]\n else:\n return []", "def test_normalizer_space_separation():\n assert TextNormalizer().transform([[\"b c\"]])[\"corpus\"][0] == [\"b\", \"c\"]", "def tokenizer(sentence):\n words = []\n for phrase in sentence.split('.'):\n for piece in phrase.split(','):\n for word in piece.split(' '):\n words.append(word)\n return words", "def tags(string):\n\treturn set([x.strip() for x in string.split(',')])", "def clean_html(self):\n self.cleaned_html = self.html.strip()\n for begin_splitter in self.begin_splitters:\n self.cleaned_html = self.cleaned_html.split(begin_splitter)[-1]\n for end_splitter in self.end_splitters:\n self.cleaned_html = self.cleaned_html.split(end_splitter)[0]\n self.cleaned_html = self.cleaned_html.strip()\n return self.cleaned_html", "def _normalize_and_tokenize_text(text: str, stemmer: Optional[Any]=None, normalizer: Callable[[str], str]=None, tokenizer: Callable[[str], Sequence[str]]=None) ->Sequence[str]:\n text = normalizer(text) if callable(normalizer) else re.sub('[^a-z0-9]+', ' ', text.lower())\n tokens = tokenizer(text) if callable(tokenizer) else re.split('\\\\s+', text)\n if stemmer:\n tokens = [(stemmer.stem(x) if len(x) > 3 else x) for x in tokens]\n tokens = [x for x in tokens if isinstance(x, str) and len(x) > 0]\n return tokens", "def split_tag(elem, tags):\n splited_tag = elem.split(TAG_SEP)\n if len(splited_tag) > 1:\n tag_prefix, tag = splited_tag\n assert tag in tags.tags\n assert tag_prefix in tags.iob\n else:\n tag = elem\n tag_prefix = None\n assert tag == tags.default\n return tag_prefix, tag", "def normalize(data):\n data = lowercase(data)\n data = remove_punct(data)\n data = remove_apostrophes(data)\n data = remove_stopwords(data)\n data = num_to_words(data)\n data = lemmatize(data)\n data = stemming(data)\n data = remove_punct(data)\n data = num_to_words(data)\n data = lemmatize(data)\n data = stemming(data)\n data = remove_punct(data) #done again to remove hyphens produced by num2words\n data = remove_stopwords(data) #done agan to remove stopwords produced by num2words\n return data", "def clean_sentences(sentences_raw):\n out = []\n for sentence in sentences_raw:\n if sentence.split() != []:\n out.append(sentence)\n return out", "def trim_entity_spans(data: list) -> list:\n invalid_span_tokens = re.compile(r'\\s')\n\n cleaned_data = []\n for text, annotations in data:\n entities = annotations['entities']\n valid_entities = []\n for start, end, label in entities:\n valid_start = start\n valid_end = end\n while valid_start < len(text) and invalid_span_tokens.match(\n text[valid_start]):\n valid_start += 1\n while valid_end > 1 and invalid_span_tokens.match(\n text[valid_end - 1]):\n valid_end -= 1\n valid_entities.append([valid_start, valid_end, label])\n cleaned_data.append([text, {'entities': valid_entities}])\n\n return cleaned_data", "def trim_entity_spans(data: list) -> list:\n invalid_span_tokens = re.compile(r'\\s')\n\n cleaned_data = []\n for text, annotations in data:\n entities = annotations['entities']\n valid_entities = []\n for start, end, label in entities:\n valid_start = start\n valid_end = end\n while valid_start < len(text) and invalid_span_tokens.match(\n text[valid_start]):\n valid_start += 1\n while valid_end > 1 and invalid_span_tokens.match(\n text[valid_end - 1]):\n valid_end -= 1\n valid_entities.append([valid_start, valid_end, label])\n cleaned_data.append([text, {'entities': valid_entities}])\n\n return cleaned_data", "def make_split_data(read_data):\n split_data = re.split('[,|\\.|\\-|\\*|\\[|\\]|\\#|\\:|\\;|(\\|)|\\\"|\\'|!|\\s]+',read_data)\n\n if split_data[-1] == '':\n del split_data[-1]\n\n return split_data", "def prepare_data(self, lines: List[str]) -> List[str]:\n if self.is_tokenized:\n if self.parser == \"spacy\":\n lines = [l.split() for l in lines]\n elif self.parser == \"udpipe\":\n lines = [[l.split()] for l in lines]\n\n return lines", "def tokenize_tag(tag):\n sentences = nltk.sent_tokenize(tag.text)\n sentence_words = []\n for sentence in sentences:\n words = nltk.casual_tokenize(sentence)\n lower_words = [w.lower() for w in words]\n filtered_words = [w for w in lower_words if w not in stop_words and not w.isdigit() and len(w) > 2]\n sentence_words += filtered_words\n return sentence_words", "def nodes_to_nags(self, nags):\n out = []\n for n in nags:\n out.append(n.text.strip(' '))\n return out", "def split_to_sentences(data):\r\n sentences = data.split(\"\\n\")\r\n \r\n sentences = [s.strip() for s in sentences]\r\n sentences = [s for s in sentences if len(s) > 0]\r\n \r\n return sentences", "def untag(tagged_sentence):\n return [w for w, _ in tagged_sentence]", "def _fix_treetags(self, tree):\n for element in tree:\n element.tag = element.tag.split('}')[1]\n if len(element.getchildren()) > 0:\n self._fix_treetags(element)\n return tree", "def sanitize(text):\n \n # Convert text to lowercase\n text = text.lower()\n\n # Replace all whitespace with a single space\n text = re.sub(r'\\s+',' ',text)\n\n # Remove all links (e.g. [abc](xyz)def --> [abc]def)\n text = re.sub(r'(\\[.*\\])(\\(.*\\))', r'\\1', text)\n\n # Remove URLs\n text = re.sub(r'((http[s]?://)?www.\\S+)|(http[s]?://\\S+)', '', text) \n\n # Split text on single spaces\n words = text.split()\n \n # Separate external punctuation then remove non-ending and non-embedded punctuation\n tokens = []\n for word in words:\n \tseparate_tokens(word, tokens)\n \n parsed_text = \"\"\n unigrams = \"\"\n bigrams = \"\"\n trigrams = \"\"\n \n # Populate lists to return\n for index, token in enumerate(tokens):\n \tparsed_text += token + ' '\n \tif token not in common:\n \t\tunigrams += token + ' '\n \t\tif index + 1 <= len(tokens)-1 and tokens[index+1] not in common:\n \t\t\tbigram = token + '_' + tokens[index+1]\n \t\t\tbigrams += bigram + ' '\n \t\t\tif index + 2 <= len(tokens)-1 and tokens[index+2] not in common:\n \t\t\t\ttrigrams += bigram + '_' + tokens[index+2] + ' '\n \n return parsed_text.strip().split() + unigrams.strip().split() + bigrams.strip().split()+ trigrams.strip().split()", "def test_tokenize_strip(self):\n input = \"((' <this> \\\"\\\" 'text' has (lots) of (special chars} >>]\"\n output = [(\"<this>\", 4), (\"text\", 15), (\"has\", 21), (\"lots\", 26), (\"of\", 32),\n (\"special\", 36), (\"chars}\", 44), (\">>\", 51)]\n self.assertEqual(output, [i for i in basic_tokenize(input)])\n for (itmO, itmV) in zip(output, basic_tokenize(input)):\n self.assertEqual(itmO, itmV)", "def parse_html_tag(sentences, html_tag_file):\n tag_list = file2tuple_list(html_tag_file, \",\")\n for key, value in tag_list:\n for index in range(len(sentences)):\n try:\n sentences[index] = re.sub(key, value, sentences[index])\n except Exception, e:\n logging.error(\"Sentence with regular expression substitution error: %s\" %sentences[index])\n logging.error(e.message)\n exit(-1)\n return sentences", "def untag(tagged_sentence):\n return [w for (w, t) in tagged_sentence]", "def sentences(self):\n return re.compile(r'[.!?;:,\\t\\(\\)\\\"\\']|\\s-\\s').split(self.text)", "def strip_tags(self, html):\n s = MLStripper()\n s.feed(html)\n return s.get_data()", "def bs_preprocess(html):\n pat = re.compile('(^[\\s]+)|([\\s]+$)', re.MULTILINE)\n html = re.sub(pat, '', html) # remove leading and trailing whitespaces\n html = re.sub('\\n', ' ', html) # convert newlines to spaces\n # this preserves newline delimiters\n html = re.sub('[\\s]+<', '<', html) # remove whitespaces before opening tags\n html = re.sub('>[\\s]+', '>', html) # remove whitespaces after closing tags\n return html", "def split_tag(chunk_tag):\n if chunk_tag == 'O':\n return ('O', None)\n return chunk_tag.split('-', maxsplit=1)", "def split_strip(input, delimiter=u','):\r\n if not input:\r\n return []\r\n\r\n words = [w.strip() for w in input.split(delimiter)]\r\n return [w for w in words if w]", "def preprocess(text):\n text = text.translate(None, string.punctuation)\n words = filter(None, re.split('\\s+', text))\n words = nltk.pos_tag(words)\n words = [(word.lower(), nltk.simplify_wsj_tag(tag)) for word, tag in words]\n words = [(word, 'V') if tag.startswith('V') else (word, tag)\n for word, tag in words]\n return words", "def split_and_trim(self):\n indeces_grouped = []\n rejected_variants = []\n groups = self._get_subgroups()\n variant_groups = []\n for group in groups:\n variant_list = []\n for i in group:\n indeces_grouped.append(i)\n variant_list.append(self.variant_list[i])\n var_id = '{}_{}'.format(variant_list[0].CHROM, min([var.start for var in variant_list]))\n variant_groups.append(VariantGroup(var_id, variant_list))\n for v, variant in enumerate(self.variant_list):\n if v not in indeces_grouped:\n rejected_variants.append(variant)\n return variant_groups, rejected_variants", "def _parse_emails(self, emails):\n return [e.strip() for e in emails.split(',')]", "def normalize(s):\n def remove_articles(text):\n return re.sub(r'\\b(a|an|the)\\b', ' ', text)\n\n def white_space_fix(text):\n return ' '.join(text.split())\n\n# def remove_punc(text):\n# exclude = set(string.punctuation)\n# return ''.join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(lower(s)))", "def _batch_tokenize(self, text: List[str]) -> List[List[str]]:\n return self.bert_model.batch_tokenize([t.strip() for t in text])", "def pos_tag(self,sentence):\n tagged = self.brill_tagger.tag(sentence.split())\n tagged_sentence = \" \".join([nltk.tag.tuple2str(tok) for tok in tagged])\n print tagged_sentence\n\n tag_list = [(each.split(\"/\")[0],each.split(\"/\")[1]) for each in tagged_sentence.split()]\n return tag_list", "def parse_tags(tagstring):\n if not tagstring:\n return []\n\n tagstring = force_str(tagstring)\n\n words = []\n buffer = []\n # Defer splitting of non-quoted sections until we know if there are\n # any unquoted commas.\n to_be_split = []\n i = iter(tagstring)\n try:\n while True:\n c = six.next(i)\n if c == '\"':\n if buffer:\n to_be_split.append(''.join(buffer))\n buffer = []\n c = six.next(i)\n while c != '\"':\n buffer.append(c)\n c = six.next(i)\n if buffer:\n word = ''.join(buffer).strip()\n if word:\n words.append(word)\n buffer = []\n else:\n buffer.append(c)\n except StopIteration:\n # If we were parsing an open quote which was never closed treat\n # the buffer as unquoted.\n if buffer:\n to_be_split.append(''.join(buffer))\n if to_be_split:\n for chunk in to_be_split:\n words.extend(split_strip(chunk, settings.TAGGIT_SELECTIZE['DELIMITER']))\n words = list(set(words))\n words.sort()\n return words", "def parse_tags(s: str) -> List[str]:\n tags = []\n buf = []\n in_quoted = None\n\n for c in s:\n if in_quoted:\n if c == in_quoted:\n in_quoted = None\n else:\n buf.append(c)\n elif c == '\"' or c == '\\'':\n in_quoted = c\n elif c == ',':\n if buf:\n tag = ''.join(buf).strip()\n if tag:\n tags.append(tag)\n buf.clear()\n else:\n buf.append(c)\n\n if buf:\n tag = ''.join(buf).strip()\n if tag:\n tags.append(tag)\n\n return tags", "def splitTag(my_tag):\n my_split = re.findall(r'(\\d+)(\\D+)', my_tag)\n return ((int(x[0]), x[1]) for x in my_split)", "def extract(elements) -> List[str]:\n return [element.text_content().translate(str.maketrans('', '', string.whitespace)) for element in elements]", "def clean(self):\n # Calls handle_starttag, handle_endtag, and handle_data\n self.feed()\n\n # Clean up any parent tags left open\n if self.current_parent_element['tag'] != '':\n self.cleaned_html += '</{}>'.format(self.current_parent_element['tag'])\n\n # Remove empty <p> added after lists\n self.cleaned_html = re.sub(r'(</[u|o]l>)<p></p>', r'\\g<1>', self.cleaned_html)\n\n self._remove_pre_formatting()\n\n return self.cleaned_html", "def simple_tokeniser(sent):\n sent = re_tok_apos.sub(r\"\\1 's\", sent)\n sent = re_tok_mw_punc.sub(r\"\\1 \\2\", sent)\n sent = re_tok_punc.sub(r\" \\1 \", sent).replace('-', ' - ')\n sent = re_tok_punc.sub(r\" \\1 \", sent)\n sent = re_tok_mult_space.sub(' ', sent)\n return sent.lower().split()", "def clean_chunk(self, chunk):\n if chunk.text.lower() in self.stopwords:\n return []\n while len(chunk) > 1:\n start_token = chunk[0]\n if start_token.text.lower() in self.stopwords or start_token.text.isdigit() or start_token.tag_ == 'PRP':\n chunk = chunk[1:]\n else:\n break\n if len(chunk) == 1:\n start_token = chunk[0]\n if start_token.text.lower() in self.stopwords or start_token.text.isdigit() or start_token.tag_ == 'PRP':\n return []\n if not re.match(r'^[a-zA-Z0-9][a-zA-Z0-9\\' -]*[a-zA-Z0-9]$', chunk.text):\n return []\n return chunk", "def join_tags(tags):\n names = []\n delimiter = settings.TAGGIT_SELECTIZE['DELIMITER']\n for tag in tags:\n name = tag.name\n if delimiter in name or ' ' in name:\n names.append('\"%s\"' % name)\n else:\n names.append(name)\n return delimiter.join(sorted(names))", "def remove_Tags(self,text):\n cleaned_text = re.sub('<[^<]+?>', '', text)", "def _tokenize(self, text: str) -> List[str]:\n return self.bert_model.tokenize(text.strip())", "def fixtags(self, text):\n # french spaces, last one Guillemet-left\n # only if there is something before the space\n text = _guillemetLeftPat.sub(ur'\\1&nbsp;\\2', text)\n # french spaces, Guillemet-right\n text = _guillemetRightPat.sub(ur'\\1&nbsp;', text)\n return text", "def normalize(\n self,\n text: str,\n n_tagged: int,\n punct_pre_process: bool = True,\n punct_post_process: bool = True,\n verbose: bool = False,\n ) -> str:\n if punct_pre_process:\n text = pre_process(text)\n text = text.strip()\n if not text:\n if verbose:\n print(text)\n return text\n\n text = pynini.escape(text)\n if n_tagged == -1:\n tagged_texts = rewrite.rewrites(text, self.tagger.fst)\n else:\n tagged_texts = rewrite.top_rewrites(text, self.tagger.fst, nshortest=n_tagged)\n\n if self.lang == 'en':\n normalized_texts = tagged_texts\n else:\n normalized_texts = []\n for tagged_text in tagged_texts:\n self._verbalize(tagged_text, normalized_texts)\n\n if len(normalized_texts) == 0:\n raise ValueError()\n if punct_post_process:\n normalized_texts = [post_process_punctuation(t) for t in normalized_texts]\n normalized_texts = set(normalized_texts)\n return normalized_texts", "def process_token_sentence(text):\n\n sentences = nltk.sent_tokenize(text)\n tokenized_sentences = [nltk.word_tokenize(sentence) for sentence in sentences]\n tagged_sentences = [nltk.pos_tag(sentence) for sentence in tokenized_sentences]\n sentences = nltk.ne_chunk_sents(tagged_sentences, binary=True)\n\n return sentences", "def clean_text(txt):\n s = ''\n for c in txt:\n if c != '.' and c != ',' and c != '!' and c != '?':\n s += c\n s = s.lower().split()\n return s", "def _add_tags(self):\n\n if self.version != 'live':\n return\n\n tags = [t.strip() for t in self.tags_text.split(',')]\n tags = list(set(tags))\n\n for tag_name in tags:\n tag_slug = slugify(tag_name)\n if tag_slug:\n try:\n tag = Tag.objects.get(blog=self.blog, slug=tag_slug)\n except Tag.DoesNotExist:\n tag = Tag( blog = self.blog,\n name = tag_name,\n slug = tag_slug)\n\n tag.increment()\n tag.save()\n\n self.tags.add(tag)" ]
[ "0.68961847", "0.6774083", "0.67034566", "0.65602213", "0.63447934", "0.62999445", "0.6269208", "0.62582284", "0.6224067", "0.61772186", "0.6168551", "0.6133678", "0.6001191", "0.5954021", "0.592172", "0.59052056", "0.5873035", "0.5848221", "0.5820034", "0.58127075", "0.5751213", "0.57387125", "0.5733556", "0.5680724", "0.5657412", "0.5638284", "0.56152636", "0.56104064", "0.55541825", "0.5521223", "0.5499318", "0.5477375", "0.5475156", "0.5452756", "0.54207903", "0.54125434", "0.5404233", "0.53740555", "0.5372722", "0.53602487", "0.5354123", "0.5330501", "0.5320657", "0.5309298", "0.5307317", "0.52976453", "0.5283792", "0.5271947", "0.52585155", "0.5257034", "0.52449864", "0.52365243", "0.52295494", "0.5221805", "0.52212644", "0.52124774", "0.52051324", "0.52023774", "0.52011746", "0.5173336", "0.51714325", "0.5167269", "0.5167269", "0.5166686", "0.5160257", "0.5159356", "0.51545835", "0.5151512", "0.5145572", "0.51300234", "0.5128472", "0.5127621", "0.5123718", "0.512321", "0.51156104", "0.511531", "0.5107379", "0.5106414", "0.5096462", "0.50961494", "0.50691754", "0.50610644", "0.50565875", "0.5054701", "0.505451", "0.50530803", "0.5050739", "0.5050011", "0.5048057", "0.5046383", "0.50441617", "0.5036436", "0.5029673", "0.5029672", "0.50229084", "0.50197977", "0.5015302", "0.501489", "0.50109464", "0.5010457" ]
0.6720322
2
Clean a sender address by using a predefined map
def clean_sender(sender): if sender in _sender_map: return _sender_map[sender] return ''
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_recipient(recipient):\n if recipient in _recipient_map:\n return _recipient_map[recipient]\n return ''", "def _cleanupAddress(self, address):\n clean = []\n \n # This is sort of a desultory effort but I'm not convinced \n # that these cleanups will actually result in cleaner searches\n for word in address.split(None):\n lower = word.lower()\n \n # Some things we just nuke\n if lower == 'at': continue\n elif lower == 'btw': continue\n elif lower == 'btwn': continue\n elif word.isdigit(): continue\n \n # Or we make substitiutions\n elif lower == 'st' or lower == 'st.':\n word = 'Street'\n elif lower == 'ave' or lower == 'ave.':\n word = 'Avenue'\n elif lower == 'pl' or lower == 'pl.':\n word = 'Place'\n elif lower == 'n': word = 'North'\n elif lower == 'e': word = 'East'\n elif lower == 's': word = 'South'\n elif lower == 'w': word = 'West'\n \n clean.append(word)\n return ' '.join(clean)", "def fixAddress(data):\n\tfor each in data:\n\t\ttempK = list()\n\t\tfor tag in each['k']:\n\t\t\tif tag.split(':')[0] == 'addr':\n\t\t\t\ttempK.append(tag.split(':')[1])\n\t\t\telse:\n\t\t\t\ttempK.append(tag)\n\t\t\teach['k'] = deepcopy(tempK)\n\t\tyield each", "def removeAll(self, addr: ghidra.program.model.address.Address) -> None:\n ...", "def clear_address(self): #DONE\n for component_name in self.__keys:\n self.address[component_name] = Component(component_name, '')", "def process_address(text):\n return sanitize(text[9:])", "def async_clear_address(self, address: str) -> None:\n self._matched.pop(address, None)\n self._matched_connectable.pop(address, None)", "def clean_address(self, s):\n # The letter \"O\" instead of the numeral \"0\" is a common mistake.\n s = re.sub(\n r\"\\b[A-Z][O0-9][A-Z]\\s?[O0-9][A-Z][O0-9]\\b\", lambda x: x.group(0).replace(\"O\", \"0\"), clean_string(s)\n )\n for k, v in province_or_territory_abbreviations().items():\n # Replace a province/territory name with its abbreviation.\n s = re.sub(\n r\"[,\\n ]+\"\n r\"\\(?\" + k + r\"\\)?\"\n r\"(?=(?:[,\\n ]+Canada)?(?:[,\\n ]+[A-Z][0-9][A-Z]\\s?[0-9][A-Z][0-9])?\\Z)\",\n \" \" + v,\n s,\n )\n # Add spaces between province/territory abbreviation, FSA and LDU and remove \"Canada\".\n return re.sub(\n r\"[,\\n ]+\" r\"([A-Z]{2})\" r\"(?:[,\\n ]+Canada)?\" r\"[,\\n ]+([A-Z][0-9][A-Z])\\s?([0-9][A-Z][0-9])\" r\"\\Z\",\n r\" \\1 \\2 \\3\",\n s,\n )", "def normalize_address(self, address, domain):\n if address is not None and not self.address_regex.match(address):\n if domain is not None:\n address = \"{address}@{domain}\".format(address=address, domain=domain)\n else:\n address = None\n\n return address", "def eliminate(values):\n for box, val in values.items():\n if len(val) == 1:\n for peer in peers[box]:\n values[peer] = values[peer].replace(val, '')", "def eliminate(values):\r\n\r\n ''' Your solution here '''\r\n for key, value in values.items():\r\n if (len(value) == 1):\r\n for key_peer in peers[key]:\r\n values[key_peer] = values[key_peer].replace(value, '')\r\n return values", "def clean_receiver(self):\n data = self.cleaned_data[\"receiver\"]\n if not is_valid_address(data):\n raise ValidationError(\"Provided value is not a valid Algorand address!\")\n return data", "def normalize_address(address: str):\n return Web3.toChecksumAddress(address.lower())", "def func_from(self, data, get_recv):\n if get_recv:\n checking = bytes(data).decode().encode('ascii', 'ignore').decode()\n else:\n checking = bytes(data).decode().encode('ascii', 'ignore').decode().splitlines()[0]\n data_list = checking.split(':')\n remove_bracket = str(data_list[1])\n remove_bracket = remove_bracket[2:-1]\n data_list[1] = remove_bracket\n check = data_list[0].lower().rstrip()\n if check == 'mail from':\n message = self.conf_th_ic.get_item(q_key='std-messages').get(check)\n self.func_sender(message)\n return True", "def keepAddresses(networkItems_):\n for i in networkItems_[:]:\n try:\n ip = netaddr.IPAddress(i)\n except:\n networkItems_.remove(i)\n return networkItems_", "def extract_addresses(addresses):\n \n # Since lists are iterated over in Python in an orderly fashion, \n # put 'Input Reg' before 'Input' such that the 'Reg' in an address \n # with an 'Input Reg' prefix doesn't get left behind\n address_prefixes= [\"Input Reg \", \"Holding Reg \", \"Input \", \"Coil \"]\n\n \n for idx,address in enumerate(addresses):\n # Replace prefixes with empty string\n for prefix in address_prefixes:\n addresses[idx]=addresses[idx].replace(prefix, \"\")\n # Extract numeral\n try:\n addresses[idx]= int(addresses[idx])\n except:\n logging.warning(\"Invalid modbus address suppied at index {}\".format(idx))\n\n # Return\n return addresses", "def sanitize_message(message, pseudo, to_address, contact):\n replacements = [\n (re.compile(pattern, re.IGNORECASE), replacement)\n for pattern, replacement in (\n # Remove user's real email address\n (re.escape(pseudo.member.user.email()), pseudo.email),\n \n # Remove contact email if message quoted in reply\n (re.escape(to_address.name[1:-1]), ''),\n (re.escape(to_address.email), contact.email),\n \n # Remove 'add to not spam list' link from replies\n ('\\<a class=\\\"%s\\\".+\\</a\\>\\n?' % LINK_REMOVE_CLASS, ''),\n )\n ]\n\n for content_type in ('body', 'html'):\n body = getattr(message, content_type).decode()\n\n for pattern, replacement in replacements:\n body = pattern.sub(replacement, body)\n\n logging.debug(body)\n\n setattr(message, content_type, body.encode())\n \n # Remove spam label from subject\n if dontspamme.config.spam_label + ' ' in message.subject:\n message.subject = message.subject.replace(dontspamme.config.spam_label + ' ', '', 1)", "def receiverMapping():", "def _remove_node_address_from_params(cls, params: dict):\n if ConstantKeys.NODE_ADDRESS in params:\n del params[ConstantKeys.NODE_ADDRESS]", "def eliminate(values):\n # TODO: Copy your code from the classroom to complete this function\n for box,value in values.items():\n #print (box,value)\n if len(values[box]) == 1:\n for peer in peers[box]:\n if value in values[peer]:\n values[peer] = values[peer].replace(value,'')\n return values", "def test_clean_ip(self):\n\n raw_ip = 'client=mail-ed1-f51.google.com[209.85.208.51]'\n result = clean_ip(raw_ip)\n self.assertEqual(result, '209.85.208.51')", "def strip_symbol_from_msgs(oChecker):\n\n dNewMsgs = {}\n for sKey, tData in oChecker.msgs.items():\n dNewMsgs[sKey] = (tData[0], tData[2])\n # Monkey patch the checker\n oChecker.msgs = dNewMsgs", "def forget_unicast_address(self):\n self.send_packet('\\xb3')", "def _check_address(self):\n for object_ in self.objects:\n if object_.object_name.endswith(' ЕС'):\n if object_.object_address[:6].isnumeric():\n object_.object_address = \\\n object_.object_address[:7] + \\\n object_.object_fed_subj + ', ' + \\\n object_.object_address[7:]", "def normalise_address(address):\n return re.sub('\\s+', ' ', str(address).upper()).replace(' ,', ',')", "def erase_address(self):\n self.remove_pointer_or_layer()\n if self.geocoder_source_model is not None:\n self.geocoder_source_model.clear()\n if self.dlg.geocoder_search is not None:\n self.dlg.geocoder_search.clear()\n if self.toolbar_search is not None:\n self.toolbar_search.clear()\n self.toolbar_search.setCompleter(None)", "def clear_address_component(self, component_name):\n if component_name in self.__keys:\n self.address[component_name] = Component(component_name, '')", "def normalize_address(addr: str) -> str:\n # bitcoin hrps\n hrps = {net[\"bech32\"] + \"1\" for net in NETWORKS.values()}\n # liquid hrps\n # Blech32 addresses are intended for confidential assets\n hrps = hrps.union(\n {net[\"blech32\"] + \"1\" for net in NETWORKS.values() if \"blech32\" in net}\n )\n if addr.lower().startswith(tuple(hrps)):\n return addr.lower()\n return addr", "def remove_ats(self):\n\t\tfor key in self.keys():\n\t\t\tif key[:1] == '@':\n\t\t\t\ttry: del self[key]\n\t\t\t\texcept: pass", "def do_replace_addr(addr):\n do_replace = True\n\n # NOTE: This list should stay in sync with wlan_exp.util mac_addr_desc_map\n\n # Don't replace the broadcast address (FF-FF-FF-FF-FF-FF)\n if(addr == (0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF)):\n do_replace = False\n\n # Don't replace multicast IP v4 addresses (01-00-5E-xx-xx-xx)\n # http://technet.microsoft.com/en-us/library/cc957928.aspx\n if(addr[0:3] == (0x01, 0x00, 0x5E) and (addr[4] <= 0x7F)):\n do_replace = False\n\n # Don't replace multicast IP v6 addresses (33-33-xx-xx-xx-xx)\n # http://www.cavebear.com/archive/cavebear/Ethernet/multicast.html\n if(addr[0:2] == (0x33, 0x33)):\n do_replace = False\n\n # Don't replace Mango addresses (40-D8-55-04-2x-xx)\n if(addr[0:4] == (0x40, 0xD8, 0x55, 0x04) and ((addr[4] & 0x20) == 0x20)):\n do_replace = False\n\n return do_replace", "def clean(self, addr, keep=False):\n if addr in six.iterkeys(self.addr_to_conn_struct_map):\n # close socket\n self.addr_to_conn_struct_map[addr].conn.close()\n # stop thread\n self.addr_to_conn_struct_map[addr].thread.stop()\n # remove struct from server map\n if not keep: # TODO: total HACK : IF clean is called within a loop, and don't want to modify size of map?\n del self.addr_to_conn_struct_map[addr]", "def eliminate(values):\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n digit = values[box]\n for peer in peers[box]:\n # values[peer] = values[peer].replace(digit, '')\n new_value = values[peer].replace(digit, '')\n assign_value(values, peer, new_value)\n return values", "def clear_recipients(self):\n self._to = []\n self._cc = []\n self._bcc = []", "def test_clean_email(self):\n\n raw_email = 'from=<user@domain.com>'\n result = clean_email(raw_email)\n self.assertEqual(result, 'user@domain.com')", "def tidy_address(address):\n address = address.lstrip('$,')\n address = address.rstrip('$,')\n address = re.sub(r'\\$US$', '', address)\n return address", "def clear_mappings(g, source):\n\n if \"m\" in g.node[source]:\n del g.node[source][\"m\"]\n\n for n in g.neighbors_iter(source):\n if \"m\" in g.node[n]:\n del g.node[n][\"m\"]", "def _strip_email(email_address):\n return re.sub(\"(?:\\.|\\+.*)(?=.*?@)\", \"\", email_address)", "def eliminate(values):\n\tsolved = [box for box in boxes if len(values[box]) == 1]\n\tempties = [box for box in boxes if len(values[box]) == 0]\n\n\tfor empty in empties:\n\t\tvalues[empty] = '123456789'\n\n\tfor box in solved:\n\n\t\tfor peer in peers[box]:\n\t\t\tvalues = assign_value(values, peer, values[peer].replace(values[box], ''))\n\n\treturn values", "def eliminate(values):\n complete_boxes = [box for box in values.keys() if len(values[box])==1]\n for box in complete_boxes:\n for peer in peers[box]:\n values = assign_value(values, peer, values[peer].replace(values[box], \"\"))\n \n return values", "def eliminate(values):\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n digit = values[box]\n for peer in peers[box]:\n values[peer] = values[peer].replace(digit,'')\n return values", "def eliminate(values):\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n digit = values[box]\n for peer in peers[box]:\n values[peer] = values[peer].replace(digit,'')\n return values", "def keepHostNames(networkItems_):\n for i in networkItems_[:]:\n try:\n ip = netaddr.IPAddress(i)\n networkItems_.remove(i)\n except:\n pass\n return networkItems_", "def coerce_address(address: Address | str) -> Address:\n if isinstance(address, str):\n header = SMTP.header_factory('sender', address)\n assert isinstance(header, SingleAddressHeader)\n return header.address\n\n assert isinstance(address, Address)\n return address", "def tidy_telephone(telephone):\n junk = ['none', 'none1', 'na', 'n/a', 'same', 'yes', 'cell', 'offsite']\n telephone = telephone.replace('xxx-xxx-xxxx', '')\n telephone = telephone.replace('ext', ' x')\n telephone = telephone.replace(' cell', '')\n telephone = telephone.replace('\"', '')\n telephone = telephone.replace('%', '')\n if telephone in junk:\n return ''\n else:\n return telephone", "def normalize_address(address):\n # Fix 'Place/Place' -> 'Place & Place'\n if re.findall(r'[a-zA-Z0-9]/[a-zA-Z0-9]', address):\n address = address.replace('/', ' & ')\n # Fix 'Place:Place' -> 'Place & Place'\n if re.findall(r'[a-zA-Z0-9]:[a-zA-Z0-9]', address):\n address = address.replace(':', ' & ')\n # Fix 'RD' -> 'Rd' & 'PK' -> 'Pk'\n if re.findall(r'[PRSA][KDTV]', address):\n address = re.sub(r'([PRSA][KDTV])', \\\n lambda x: x.group(0).title(), address)\n # Fix 'Bl' -> 'Blvd'\n if re.findall(r'(Bl)[\\ ]', address):\n address = address.replace('Bl', 'Blvd')\n # Fix 'w 156th' -> 'W 156th'\n if re.findall(r'[^a-zA-Z][wnse][/ ]', address):\n address = re.sub(r'[^a-zA-Z]([wnse])[/ ]', \\\n lambda x: x.group(0).upper(), address)\n # Fix '151 St' -> '151st St'\n if re.findall(r'[0-9][\\ ][SA][tv]', address):\n address = re.sub(r'[0-9]+', \\\n ordinal_conversion, address)\n return address", "def sanitize(self, line):\n self.line_count = self.line_count + 1\n components = line.split(\",\")\n for comp in components:\n # if any field has IP in it, see if the value matches something in the dict\n items = comp.split(\"=\")\n if \"ip\" in items[0]:\n if items[1] in self._insts:\n # found this dirty IP in our learned dictionary, replace it\n dirty_ip = items[1]\n clean_ip = self._insts[dirty_ip]\n line = re.sub(dirty_ip, clean_ip, line, 1)\n\n # if this message has a pdu, clean up the pdu too\n msg_type = self._extract_by_key(line, \"type\")\n if \"sflow\" in msg_type or \"event\" in msg_type:\n pdu = self._extract_by_key(line, \"pdu\")\n # substitute the converted IP based on type\n if \".\" in dirty_ip:\n # v4\n line = re.sub(self._v4_string_to_hex(dirty_ip),\n self._v4_string_to_hex(clean_ip), line)\n\n pdu = self.fix_checksum(pdu)\n\n line = line[0: (line.find(\"pdu=\")) +4] + pdu + \",\\n\"\n else:\n # v6 - remove : and go to lower case before swap\n dirty_swap = re.sub(\":\", \"\", dirty_ip)\n dirty_swap = dirty_swap.lower()\n line = re.sub(dirty_swap, self._v4_string_to_hex(clean_ip), line)\n if (args.hexdump):\n pdu_hex = pdu\n pdu_hex = \" \".join(pdu_hex[i:i+2] for i in range(0, len(pdu_hex), 2)) #put timestamp and offset in front of pdu hex\n pdu_hex = pdu[0:15] + \" 000000 \" + pdu_hex + \" ,\\n\"\n hexdump_file.write(pdu_hex)\n return line", "def normalize_address(patched_address: OrderedDict[str, str]) -> location.Address:\n\n address_kwargs = {\n # \"street1\",\n # \"city\",\n # \"state\",\n # \"zip\"\n }\n street_buffer: List[str] = []\n suite_buffer: List[str] = []\n while len(patched_address) > 0:\n component, value = patched_address.popitem(last=False)\n if component == \"PlaceName\":\n address_kwargs[\"city\"] = value\n elif component == \"StateName\":\n address_kwargs[\"state\"] = value\n elif component == \"ZipCode\":\n address_kwargs[\"zip\"] = value\n elif component == \"OccupancyType\":\n suite_buffer.append(value)\n elif component == \"OccupancyIdentifier\":\n suite_buffer.append(value)\n else:\n street_buffer.append(value)\n address_kwargs[\"street1\"] = \" \".join(street_buffer)\n if len(suite_buffer) > 0:\n address_kwargs[\"street2\"] = \" \".join(suite_buffer)\n\n return location.Address(**address_kwargs)", "def sender(self, addr,name):\n self.s[name] = (addr,self.ssn.sender(addr)) \n return self.s[name]", "def test_clean_email_empty(self):\n\n raw_email = 'from=<>'\n result = clean_email(raw_email)\n self.assertEqual(result, '')", "def normalize(address):\n replacement = re.sub('\\W+', SEPARATOR, address.lower())\n\n processed = []\n for p in replacement.split(SEPARATOR):\n if not p:\n continue\n\n if p in ABBRS:\n processed.append(ABBRS[p])\n else:\n processed.append(p)\n\n processed.sort()\n\n normalized = SEPARATOR.join(processed)\n return normalized", "def decapsulate(self, other):\n s1 = self.to_bytes()\n s2 = Multiaddr(other).to_bytes()\n try:\n idx = s1.rindex(s2)\n except ValueError:\n # if multiaddr not contained, returns a copy\n return Multiaddr(self)\n return Multiaddr(s1[:idx])", "def remove(self, transport):\r\n recipients = copy.copy(self.recipients)\r\n for address, recManager in recipients.iteritems():\r\n recManager.remove(transport)\r\n if not len(recManager.transports):\r\n del self.recipients[address]", "def sender(self):\n key, alt = ('Sender', 'From') if not self.resent else \\\n ('Resent-Sender', 'Resent-From')\n value = self.get(key) or self.get(alt)\n _, addr = getaddresses([value])[0]\n return addr", "def clean_venue(location):\n venue = None\n address = None\n locality = location['City'] if 'City' in location else None\n region = location['State.Abbreviated'] if 'State.Abbreviated' in location else None\n postal_code = None\n \n return ' '.join(['' if i is None else i for i in [venue, address, locality, region, postal_code]])", "def _remove_area_code(phone):\n\n if not phone.startswith('+46'):\n return phone\n else:\n return '0' + phone[3:]", "def fix_calldata(self) -> None:\n for step in self.steps:\n step.call_data = step.call_data.replace(\n \"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef\", self.sender[2:]\n )", "def testGetAddresses3(self):\n self.shop.setMailFromAddress(\"john@doe.com\")\n \n sender = self.addresses.getSender()\n self.assertEqual(sender, \"Site Administrator <john@doe.com>\")\n \n # Just sender is set, hence receiver is same as sender\n receivers = self.addresses.getReceivers()\n self.assertEqual(receivers, (\"Site Administrator <john@doe.com>\",))\n \n # Name and address is set\n self.shop.setMailFromName(\"John Doe\")\n \n sender = self.addresses.getSender()\n self.assertEqual(sender, \"John Doe <john@doe.com>\")\n\n # Just sender is set, hence receiver is same as sender\n receivers = self.addresses.getReceivers()\n self.assertEqual(receivers, (\"John Doe <john@doe.com>\",))\n\n # Receivers set\n self.shop.setMailTo([\"Jane Doe <jane@doe.com>\"])\n\n sender = self.addresses.getSender()\n self.assertEqual(sender, \"John Doe <john@doe.com>\")\n \n receivers = self.addresses.getReceivers()\n self.assertEqual(receivers, (\"Jane Doe <jane@doe.com>\",))\n\n # More receivers set\n self.shop.setMailTo([\"Jane Doe <jane@doe.com>\", \"baby@joe.com\"])\n\n receivers = self.addresses.getReceivers()\n self.assertEqual(receivers, (\"Jane Doe <jane@doe.com>\", \"baby@joe.com\"))", "def __process_address(self, address: Tuple[int, int, int, int, int]) -> Dict[str, int]:\n return {\n 'interface': address[0],\n 'protocol': address[1],\n 'type': address[2],\n 'hardware_type': address[3],\n 'address': address[4],\n }", "def unknown_address(ip_address: str) -> dict:\n address = UNKNOWN_COUNTRY.copy()\n address.update({\"remote_addr\": ip_address})\n return address", "def _mask(self, map_):\n return None", "def _clean_address(self, field):\n data = self.cleaned_data[field]\n if data != \"\" and not is_valid_address(data):\n raise ValidationError(\"Provided value is not a valid Algorand address!\")\n return data", "def fix_transport_rxns(x):\n TRANSPORT_REGEX = r\"[\\[\\(]side *\\d+[\\]\\)]\"\n x = strip_stoich_wrapper(x)\n x = re.sub(TRANSPORT_REGEX, \"\", x, re.IGNORECASE)\n return x", "def listToAddr(location):\n\n start_time = time.time()\n wk = [key for key in location.keys() if key in ('street', 'house_num', 'suburb', 'city', 'province', 'country', 'pos_code')]\n address = re.sub(',', '', ', '.join(value for value in dict(zip(wk, [location[k] for k in wk])).values() if value), 1)\n print('--- Tiempo de ejecucion listToAddr: {} segundos ---'.format((time.time() - start_time)))\n return address", "def _extract_email_address(self, from_email):\n res = email.utils.parseaddr(from_email)\n if len(res[1]) != 0:\n return res[1].lower()\n else:\n print(res, from_email)\n return \"\"", "def _sanitize_ipv4_mapping(ip_str):\r\n if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):\r\n # not an ipv4 mapping\r\n return ip_str\r\n\r\n hextets = ip_str.split(':')\r\n\r\n if '.' in hextets[-1]:\r\n # already sanitized\r\n return ip_str\r\n\r\n ipv4_address = \"%d.%d.%d.%d\" % (\r\n int(hextets[6][0:2], 16),\r\n int(hextets[6][2:4], 16),\r\n int(hextets[7][0:2], 16),\r\n int(hextets[7][2:4], 16),\r\n )\r\n\r\n result = ':'.join(hextets[0:6])\r\n result += ':' + ipv4_address\r\n\r\n return result", "def test_drop_emails():\n cleaner = TextCleaner()\n assert cleaner.transform([[\"test@webmail.com\"]])[\"corpus\"][0] == \"\"\n assert not cleaner.drops[\"email\"].dropna().empty", "def clean_w_map(value, mapping):\n if value in mapping.keys():\n return mapping[value]\n return value", "def clean_counselor(counselor):\n counselor = dict(counselor)\n\n if not REQUIRED_COUNSELOR_KEYS.issubset(set(counselor.keys())):\n raise ValueError('missing keys in counselor')\n\n lat_lng_keys = ('agc_ADDR_LATITUDE', 'agc_ADDR_LONGITUDE')\n for key in lat_lng_keys:\n counselor[key] = float_or_none(counselor[key])\n\n for key in ('city', 'nme'):\n counselor[key] = title_case(counselor[key])\n\n counselor['email'] = reformat_email(counselor['email'])\n counselor['weburl'] = reformat_weburl(counselor['weburl'])\n\n return counselor", "def cleaning (data):", "def clean_mentions(self, tweet):\n self.mentions = [tag.strip('@') for tag in tweet.split() if tag.startswith('@')]\n\n for mention in self.mentions:\n tweet = tweet.replace('@'+mention, '')\n\n tweet = self.clean_unnecessary_whitespaces(tweet)\n\n return tweet", "def remove_phone(body):\r\n phone = re.compile('[0-9]{7}|[0-9]{3}[\\- ][0-9]{3}[\\- ][0-9]{4}|[0-9]{10}|\\([0-9]{3}\\)[\\- ][0-9]{3}[\\- ][0-9]{4}')\r\n body = re.sub(phone, 'phone', body)\r\n return body", "def clean_street(self):\n street = self.cleaned_data['street'].strip().title()\n street = re.sub(r'\\bRoad\\b', 'Rd', street)\n street = re.sub(r'\\bStreet\\b', 'Str', street)\n street = re.sub(r'\\bAvenue\\b', 'Ave', street)\n street = re.sub(r'\\bParkway\\b', 'Pkwy', street)\n street = re.sub(r'\\bSuite\\b', 'Ste', street)\n street = re.sub(r'\\bApartment\\b', 'Apt', street)\n street = re.sub(r'\\s+', ' ', street) # Remove runs of spaces\n return street", "def addresses( data ) :\n return list( set(chain.from_iterable( [ re.sub(r'\\[.*?\\]\\s+','',x['C1']).split('; ') for x in data ] )))", "def remove_emails(text):\n return re.sub(r'([\\w0-9._-]+@[\\w0-9._-]+\\.[\\w0-9_-]+)', ' ', text)", "def test_normalize_unknown_address(self) -> None:\n address = unknown_address()\n normalized = normalize_an_address(address)\n\n valid_address_assertions(\n test_method=self.TEST_METHOD,\n locale=\"international\",\n original_address=address,\n returned_address=normalized,\n expected_residential_indicator=None,\n )", "def remove_from_earth(sender, instance, **kwargs):\n\tgrplst = instance.groups_as_string.split(\", \")\n\tmail = instance.associated_user.email\n\t#loop over list\n\tfor grp in grplst:\n\t\trequests.delete(\"https://api.mailgun.net/v3/lists/{}@arenbergorkest.be/members/{}\".format(grp,mail),auth=('api', settings.MAILGUN_API_KEY))", "def honeypot_unpeer(self,honeypotids):\n req = {\"type\":\"unpeer\",\n \"from\":self.network.mc_id,\n \"to\":honeypotids}\n expect_dict = {\"type\":\"unpeered\"}\n msg_list = self.send_receive(req,honeypotids,expect_dict)\n answer = []\n for msg in msg_list:\n answer.append(msg[\"from\"])\n return answer", "def delete_sSMS():\n\tfor msg in client.messages.list():\n\t\ttemp = str(msg.from_)\n\t\tif(temp == base):\n\t\t\tclient.messages.delete(msg.sid)", "def sender(self, sender: Address) -> None:\n enforce(\n isinstance(sender, str), f\"Sender must be string. Found '{type(sender)}'\"\n )\n self._sender = sender", "def remove_emails(text: str, emails=_EMAILS_RE) -> str:\n return emails.sub('', text)", "def clean_location(df):\n \n local = df['location'].astype(str)\n \n #geocoders read X St at Y St better than X & Y or X/Y\n local = local.str.replace(\"&\", \"at\")\n local = local.str.replace(\"/\", \"at\")\n \n #OpenAddress dataset has addresses in title case\n local = local.str.title()\n\n return df.assign(location=local.values)", "def _sanitize_request_data(self, data, keys=KEYS_TO_SANITIZE):\n for key in keys:\n data.pop(key, None)", "def filter_blacklisted_recipients(addresses):\n if type(addresses) is str:\n addr = parseaddr(addresses)[1]\n if not BlacklistedEmail.objects.filter(email=addr).exists():\n return addresses\n return []\n if type(addresses) is list:\n filtered_addresses = []\n for recipient in addresses:\n addr = parseaddr(recipient)[1]\n logger.debug(f\"Address in check for blacklist: {addr}\")\n if BlacklistedEmail.objects.filter(email=addr).exists():\n continue\n filtered_addresses.append(recipient)\n return filtered_addresses", "def clean_email(self, email):\n return email", "def unlink(address):", "def _strip_map(mols):\n for m in mols:\n [a.ClearProp('molAtomMapNumber')\n for a in m.GetAtoms() if a.HasProp('molAtomMapNumber')]\n return mols", "def cleanup(self):\n all_aps_info = self.zd.get_all_ap_info()\n all_aps_ins = self.testbed.components['AP']\n for ap_ins in all_aps_ins:\n for ap_info in all_aps_info:\n if ap_ins.base_mac_addr.upper() == ap_info.get('mac').upper() and ap_info.get('ip_addr') != '':\n ap_ins.ip_addr = ap_info.get('ip_addr')", "def mail_address(mail_addr_list):\n if mail_addr_list:\n mail_addr_list = mail_addr_list.replace(\" \", \"\")\n if \",\" in mail_addr_list:\n mail_addr_list = mail_addr_list.replace(\",\", \";\")\n mail_addr_list = mail_addr_list.split(\";\")\n for mail_addr in mail_addr_list:\n if len(mail_addr.split(\"@\")) != 2:\n raise ArgumentTypeError(\"Invalid mail address: %s\" % mail_addr)\n return mail_addr_list\n else:\n raise ArgumentTypeError(\"mail address is not specified\")", "def convert_address(self, addr_obj):\n return addr_obj.mailbox.decode() + '@' + addr_obj.host.decode()", "def lookup(self, message) :\n message = email.message_from_string(message)\n \n # if the message is not to this project, ignore it\n trac_address = self.env.config.get('mail', 'address')\n if not trac_address:\n trac_address = self.env.config.get('notification', 'smtp_replyto')\n if not self.env.config.getbool('mail', 'accept_all') :\n to = list(email.Utils.parseaddr(message['to']))\n cc = message.get('cc','').strip()\n if cc:\n cc = [email.Utils.parseaddr(i.strip())[1] \n for i in cc.split(',') if i.strip()]\n to = to + cc\n delivered_to = message.get('delivered-to', '').strip()\n if delivered_to:\n to.append(email.Utils.parseaddr(delivered_to)[1])\n original_to = message.get('x-original-to', '').strip()\n if original_to:\n to.append(original_to)\n \n if trac_address not in to:\n raise AddressLookupException(\"Email (to : %s ) does not match Trac address: %s\" %(str(to), trac_address))\n \n return message", "def test_client_address_delete(self):\n pass", "def remove_address(intent, session):\n sess_data = session.setdefault('attributes', {})\n sess_data['remove_address'] = True\n\n # Retrieve stored data just to check if it exists or not.\n user_data = database.get_user_data(session['user']['userId'])\n if not user_data:\n return reply.build(\"I already don't remember any addresses for you.\",\n is_end=True)\n elif sess_data.get('awaiting_confirmation'):\n # The user has requested removal and\n # we requested confirmation\n if intent['name'] == 'AMAZON.NoIntent':\n return reply.build(\"Okay, keeping your stored addresses.\",\n is_end=True)\n elif intent['name'] == 'AMAZON.YesIntent':\n succ = database.delete_user(session['user']['userId'])\n if succ:\n return reply.build(\"Okay, I've forgotten all the addresses \"\n \"you told me.\", is_end=True)\n else:\n # Only get here if the database interaction fails somehow\n return reply.build(\"Huh. Something went wrong.\", is_end=True)\n else:\n # Shouldn't ever get here.\n return reply.build(\"Sorry, I don't know what you mean. \"\n \"Try again?\", persist=sess_data, is_end=False)\n else:\n # Prompt the user for confirmation of data removal.\n sess_data['awaiting_confirmation'] = True\n return reply.build(\"Do you really want me to forget the addresses \"\n \"you gave me?\",\n reprompt='Say \"yes\" to delete all stored addresses '\n 'or \"no\" to not change anything.',\n persist=sess_data,\n is_end=False)", "def clear_nastran(self):\n self.eid_map = {}\n self.nid_map = {}\n self.eid_to_nid_map = {}\n self.element_ids = None\n self.node_ids = None", "def _parse_from_email(self, from_email):\n if isinstance(from_email, str):\n return self._generate_email(from_email)\n elif isinstance(from_email, dict):\n return self._generate_email(**from_email)\n else:\n raise ValueError('Invalid from email adress')", "def on_scrub_geo(self, notice):\n log.debug(\"Received location deletion notice: %s\", notice)", "def clear_cep(cep: Any) -> str:\n cep = str(cep)\n new_cep = \"\"\n\n for e in cep:\n new_cep += e if e not in pontuations else \"\"\n\n if len(new_cep) != 8:\n raise TypeError\n\n cep = new_cep[:5] + \"-\" + new_cep[5:]\n\n return cep", "def clean_ipv6_address(\n ip_str, unpack_ipv4=False, error_message=_(\"This is not a valid IPv6 address.\")\n):\n try:\n addr = ipaddress.IPv6Address(int(ipaddress.IPv6Address(ip_str)))\n except ValueError:\n raise ValidationError(error_message, code=\"invalid\")\n\n if unpack_ipv4 and addr.ipv4_mapped:\n return str(addr.ipv4_mapped)\n elif addr.ipv4_mapped:\n return \"::ffff:%s\" % str(addr.ipv4_mapped)\n\n return str(addr)", "def broadcast(self, addr, message):\n for addr in set(six.iterkeys(self.addr_to_conn_struct_map)) - {addr}:\n try:\n self.addr_to_conn_struct_map[addr].conn.send(message)\n except:\n # if we have any error sending, close the client connection, then remove it from our list\n self.clean(addr)", "def delete(self, *args, **kwargs):\n\n if args:\n self.service.remove(EtherAddress(args[0]))\n else:\n self.service.remove_all()", "def set_filter_address(self, addresses):\r\n if isinstance(addresses, basestring):\r\n addresses = [addresses]\r\n self.filter_src_addresses = addresses" ]
[ "0.6415194", "0.63102794", "0.6073433", "0.5693001", "0.5599912", "0.5535422", "0.5509846", "0.5509394", "0.5472849", "0.5472058", "0.5465454", "0.5461169", "0.5429801", "0.54245687", "0.53459334", "0.53453904", "0.53189844", "0.52752376", "0.52727574", "0.52652967", "0.5262103", "0.5233898", "0.52338517", "0.5228017", "0.5209888", "0.5204101", "0.51892436", "0.51746005", "0.51458806", "0.51393133", "0.5138814", "0.5132007", "0.5123763", "0.51209456", "0.5110345", "0.5094843", "0.5081283", "0.50742567", "0.5071741", "0.5068843", "0.5068843", "0.5056825", "0.5056453", "0.5056296", "0.50224024", "0.5018876", "0.50179166", "0.5008161", "0.50054616", "0.5002146", "0.5000677", "0.49687052", "0.4949327", "0.49482217", "0.49244067", "0.4922417", "0.49183795", "0.49160424", "0.49015912", "0.48866028", "0.4885231", "0.48850515", "0.4870547", "0.48697177", "0.48447984", "0.48445946", "0.4843519", "0.4841562", "0.4841162", "0.4837713", "0.48322195", "0.48261464", "0.48187563", "0.48175213", "0.4809718", "0.48036256", "0.47999594", "0.4796267", "0.47877607", "0.47875103", "0.47727787", "0.47664937", "0.47552925", "0.47488195", "0.47478557", "0.4737189", "0.47351956", "0.4734695", "0.47322226", "0.47312766", "0.4729371", "0.47187927", "0.47050214", "0.4704658", "0.47033793", "0.46970525", "0.46926516", "0.4691975", "0.46900156", "0.46896204" ]
0.6980281
0
Clean a recipient address by using a predefined map
def clean_recipient(recipient): if recipient in _recipient_map: return _recipient_map[recipient] return ''
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _cleanupAddress(self, address):\n clean = []\n \n # This is sort of a desultory effort but I'm not convinced \n # that these cleanups will actually result in cleaner searches\n for word in address.split(None):\n lower = word.lower()\n \n # Some things we just nuke\n if lower == 'at': continue\n elif lower == 'btw': continue\n elif lower == 'btwn': continue\n elif word.isdigit(): continue\n \n # Or we make substitiutions\n elif lower == 'st' or lower == 'st.':\n word = 'Street'\n elif lower == 'ave' or lower == 'ave.':\n word = 'Avenue'\n elif lower == 'pl' or lower == 'pl.':\n word = 'Place'\n elif lower == 'n': word = 'North'\n elif lower == 'e': word = 'East'\n elif lower == 's': word = 'South'\n elif lower == 'w': word = 'West'\n \n clean.append(word)\n return ' '.join(clean)", "def clean_address(self, s):\n # The letter \"O\" instead of the numeral \"0\" is a common mistake.\n s = re.sub(\n r\"\\b[A-Z][O0-9][A-Z]\\s?[O0-9][A-Z][O0-9]\\b\", lambda x: x.group(0).replace(\"O\", \"0\"), clean_string(s)\n )\n for k, v in province_or_territory_abbreviations().items():\n # Replace a province/territory name with its abbreviation.\n s = re.sub(\n r\"[,\\n ]+\"\n r\"\\(?\" + k + r\"\\)?\"\n r\"(?=(?:[,\\n ]+Canada)?(?:[,\\n ]+[A-Z][0-9][A-Z]\\s?[0-9][A-Z][0-9])?\\Z)\",\n \" \" + v,\n s,\n )\n # Add spaces between province/territory abbreviation, FSA and LDU and remove \"Canada\".\n return re.sub(\n r\"[,\\n ]+\" r\"([A-Z]{2})\" r\"(?:[,\\n ]+Canada)?\" r\"[,\\n ]+([A-Z][0-9][A-Z])\\s?([0-9][A-Z][0-9])\" r\"\\Z\",\n r\" \\1 \\2 \\3\",\n s,\n )", "def fixAddress(data):\n\tfor each in data:\n\t\ttempK = list()\n\t\tfor tag in each['k']:\n\t\t\tif tag.split(':')[0] == 'addr':\n\t\t\t\ttempK.append(tag.split(':')[1])\n\t\t\telse:\n\t\t\t\ttempK.append(tag)\n\t\t\teach['k'] = deepcopy(tempK)\n\t\tyield each", "def process_address(text):\n return sanitize(text[9:])", "def normalise_address(address):\n return re.sub('\\s+', ' ', str(address).upper()).replace(' ,', ',')", "def normalize_address(self, address, domain):\n if address is not None and not self.address_regex.match(address):\n if domain is not None:\n address = \"{address}@{domain}\".format(address=address, domain=domain)\n else:\n address = None\n\n return address", "def sanitize_message(message, pseudo, to_address, contact):\n replacements = [\n (re.compile(pattern, re.IGNORECASE), replacement)\n for pattern, replacement in (\n # Remove user's real email address\n (re.escape(pseudo.member.user.email()), pseudo.email),\n \n # Remove contact email if message quoted in reply\n (re.escape(to_address.name[1:-1]), ''),\n (re.escape(to_address.email), contact.email),\n \n # Remove 'add to not spam list' link from replies\n ('\\<a class=\\\"%s\\\".+\\</a\\>\\n?' % LINK_REMOVE_CLASS, ''),\n )\n ]\n\n for content_type in ('body', 'html'):\n body = getattr(message, content_type).decode()\n\n for pattern, replacement in replacements:\n body = pattern.sub(replacement, body)\n\n logging.debug(body)\n\n setattr(message, content_type, body.encode())\n \n # Remove spam label from subject\n if dontspamme.config.spam_label + ' ' in message.subject:\n message.subject = message.subject.replace(dontspamme.config.spam_label + ' ', '', 1)", "def clean_receiver(self):\n data = self.cleaned_data[\"receiver\"]\n if not is_valid_address(data):\n raise ValidationError(\"Provided value is not a valid Algorand address!\")\n return data", "def eliminate(values):\r\n\r\n ''' Your solution here '''\r\n for key, value in values.items():\r\n if (len(value) == 1):\r\n for key_peer in peers[key]:\r\n values[key_peer] = values[key_peer].replace(value, '')\r\n return values", "def normalize_address(patched_address: OrderedDict[str, str]) -> location.Address:\n\n address_kwargs = {\n # \"street1\",\n # \"city\",\n # \"state\",\n # \"zip\"\n }\n street_buffer: List[str] = []\n suite_buffer: List[str] = []\n while len(patched_address) > 0:\n component, value = patched_address.popitem(last=False)\n if component == \"PlaceName\":\n address_kwargs[\"city\"] = value\n elif component == \"StateName\":\n address_kwargs[\"state\"] = value\n elif component == \"ZipCode\":\n address_kwargs[\"zip\"] = value\n elif component == \"OccupancyType\":\n suite_buffer.append(value)\n elif component == \"OccupancyIdentifier\":\n suite_buffer.append(value)\n else:\n street_buffer.append(value)\n address_kwargs[\"street1\"] = \" \".join(street_buffer)\n if len(suite_buffer) > 0:\n address_kwargs[\"street2\"] = \" \".join(suite_buffer)\n\n return location.Address(**address_kwargs)", "def _check_address(self):\n for object_ in self.objects:\n if object_.object_name.endswith(' ЕС'):\n if object_.object_address[:6].isnumeric():\n object_.object_address = \\\n object_.object_address[:7] + \\\n object_.object_fed_subj + ', ' + \\\n object_.object_address[7:]", "def tidy_address(address):\n address = address.lstrip('$,')\n address = address.rstrip('$,')\n address = re.sub(r'\\$US$', '', address)\n return address", "def eliminate(values):\n for box, val in values.items():\n if len(val) == 1:\n for peer in peers[box]:\n values[peer] = values[peer].replace(val, '')", "def removeAll(self, addr: ghidra.program.model.address.Address) -> None:\n ...", "def extract_addresses(addresses):\n \n # Since lists are iterated over in Python in an orderly fashion, \n # put 'Input Reg' before 'Input' such that the 'Reg' in an address \n # with an 'Input Reg' prefix doesn't get left behind\n address_prefixes= [\"Input Reg \", \"Holding Reg \", \"Input \", \"Coil \"]\n\n \n for idx,address in enumerate(addresses):\n # Replace prefixes with empty string\n for prefix in address_prefixes:\n addresses[idx]=addresses[idx].replace(prefix, \"\")\n # Extract numeral\n try:\n addresses[idx]= int(addresses[idx])\n except:\n logging.warning(\"Invalid modbus address suppied at index {}\".format(idx))\n\n # Return\n return addresses", "def normalize_address(address):\n # Fix 'Place/Place' -> 'Place & Place'\n if re.findall(r'[a-zA-Z0-9]/[a-zA-Z0-9]', address):\n address = address.replace('/', ' & ')\n # Fix 'Place:Place' -> 'Place & Place'\n if re.findall(r'[a-zA-Z0-9]:[a-zA-Z0-9]', address):\n address = address.replace(':', ' & ')\n # Fix 'RD' -> 'Rd' & 'PK' -> 'Pk'\n if re.findall(r'[PRSA][KDTV]', address):\n address = re.sub(r'([PRSA][KDTV])', \\\n lambda x: x.group(0).title(), address)\n # Fix 'Bl' -> 'Blvd'\n if re.findall(r'(Bl)[\\ ]', address):\n address = address.replace('Bl', 'Blvd')\n # Fix 'w 156th' -> 'W 156th'\n if re.findall(r'[^a-zA-Z][wnse][/ ]', address):\n address = re.sub(r'[^a-zA-Z]([wnse])[/ ]', \\\n lambda x: x.group(0).upper(), address)\n # Fix '151 St' -> '151st St'\n if re.findall(r'[0-9][\\ ][SA][tv]', address):\n address = re.sub(r'[0-9]+', \\\n ordinal_conversion, address)\n return address", "def mail_address(mail_addr_list):\n if mail_addr_list:\n mail_addr_list = mail_addr_list.replace(\" \", \"\")\n if \",\" in mail_addr_list:\n mail_addr_list = mail_addr_list.replace(\",\", \";\")\n mail_addr_list = mail_addr_list.split(\";\")\n for mail_addr in mail_addr_list:\n if len(mail_addr.split(\"@\")) != 2:\n raise ArgumentTypeError(\"Invalid mail address: %s\" % mail_addr)\n return mail_addr_list\n else:\n raise ArgumentTypeError(\"mail address is not specified\")", "def filter_blacklisted_recipients(addresses):\n if type(addresses) is str:\n addr = parseaddr(addresses)[1]\n if not BlacklistedEmail.objects.filter(email=addr).exists():\n return addresses\n return []\n if type(addresses) is list:\n filtered_addresses = []\n for recipient in addresses:\n addr = parseaddr(recipient)[1]\n logger.debug(f\"Address in check for blacklist: {addr}\")\n if BlacklistedEmail.objects.filter(email=addr).exists():\n continue\n filtered_addresses.append(recipient)\n return filtered_addresses", "def clean_sender(sender):\n if sender in _sender_map:\n return _sender_map[sender]\n return ''", "def clear_recipients(self):\n self._to = []\n self._cc = []\n self._bcc = []", "def normalize_address(address: str):\n return Web3.toChecksumAddress(address.lower())", "def _clean_address(self, field):\n data = self.cleaned_data[field]\n if data != \"\" and not is_valid_address(data):\n raise ValidationError(\"Provided value is not a valid Algorand address!\")\n return data", "def clean_venue(location):\n venue = None\n address = None\n locality = location['City'] if 'City' in location else None\n region = location['State.Abbreviated'] if 'State.Abbreviated' in location else None\n postal_code = None\n \n return ' '.join(['' if i is None else i for i in [venue, address, locality, region, postal_code]])", "def listToAddr(location):\n\n start_time = time.time()\n wk = [key for key in location.keys() if key in ('street', 'house_num', 'suburb', 'city', 'province', 'country', 'pos_code')]\n address = re.sub(',', '', ', '.join(value for value in dict(zip(wk, [location[k] for k in wk])).values() if value), 1)\n print('--- Tiempo de ejecucion listToAddr: {} segundos ---'.format((time.time() - start_time)))\n return address", "def remove_phone(body):\r\n phone = re.compile('[0-9]{7}|[0-9]{3}[\\- ][0-9]{3}[\\- ][0-9]{4}|[0-9]{10}|\\([0-9]{3}\\)[\\- ][0-9]{3}[\\- ][0-9]{4}')\r\n body = re.sub(phone, 'phone', body)\r\n return body", "def eliminate(values):\n # TODO: Copy your code from the classroom to complete this function\n for box,value in values.items():\n #print (box,value)\n if len(values[box]) == 1:\n for peer in peers[box]:\n if value in values[peer]:\n values[peer] = values[peer].replace(value,'')\n return values", "def tidy_telephone(telephone):\n junk = ['none', 'none1', 'na', 'n/a', 'same', 'yes', 'cell', 'offsite']\n telephone = telephone.replace('xxx-xxx-xxxx', '')\n telephone = telephone.replace('ext', ' x')\n telephone = telephone.replace(' cell', '')\n telephone = telephone.replace('\"', '')\n telephone = telephone.replace('%', '')\n if telephone in junk:\n return ''\n else:\n return telephone", "def fixAddressObject(xml_):\n rv = xml_\n m = re.search(r'(.*<AddressObj:Address_Value condition=\"Equals\")>(.*comma.*)(<.*)', xml_, re.DOTALL)\n if m:\n rv = m.group(1) + ' apply_condition=\"ANY\">' + m.group(2) + m.group(3)\n return rv", "def _strip_email(email_address):\n return re.sub(\"(?:\\.|\\+.*)(?=.*?@)\", \"\", email_address)", "def _remove_node_address_from_params(cls, params: dict):\n if ConstantKeys.NODE_ADDRESS in params:\n del params[ConstantKeys.NODE_ADDRESS]", "def decode_email_address(address, charset=\"utf8\"):\r\n name = decode_email_header(address[0])\r\n addr = address[1]\r\n addr = \"<\" + addr + \">\"\r\n if not name:\r\n return addr\r\n return name + \" \" + addr", "def erase_address(self):\n self.remove_pointer_or_layer()\n if self.geocoder_source_model is not None:\n self.geocoder_source_model.clear()\n if self.dlg.geocoder_search is not None:\n self.dlg.geocoder_search.clear()\n if self.toolbar_search is not None:\n self.toolbar_search.clear()\n self.toolbar_search.setCompleter(None)", "def test_clean_email(self):\n\n raw_email = 'from=<user@domain.com>'\n result = clean_email(raw_email)\n self.assertEqual(result, 'user@domain.com')", "def coerce_address(address: Address | str) -> Address:\n if isinstance(address, str):\n header = SMTP.header_factory('sender', address)\n assert isinstance(header, SingleAddressHeader)\n return header.address\n\n assert isinstance(address, Address)\n return address", "def normalize(address):\n replacement = re.sub('\\W+', SEPARATOR, address.lower())\n\n processed = []\n for p in replacement.split(SEPARATOR):\n if not p:\n continue\n\n if p in ABBRS:\n processed.append(ABBRS[p])\n else:\n processed.append(p)\n\n processed.sort()\n\n normalized = SEPARATOR.join(processed)\n return normalized", "def clean_street(self):\n street = self.cleaned_data['street'].strip().title()\n street = re.sub(r'\\bRoad\\b', 'Rd', street)\n street = re.sub(r'\\bStreet\\b', 'Str', street)\n street = re.sub(r'\\bAvenue\\b', 'Ave', street)\n street = re.sub(r'\\bParkway\\b', 'Pkwy', street)\n street = re.sub(r'\\bSuite\\b', 'Ste', street)\n street = re.sub(r'\\bApartment\\b', 'Apt', street)\n street = re.sub(r'\\s+', ' ', street) # Remove runs of spaces\n return street", "def recipient(self):\n\t\trecipient = re.search(r\"([Tt]\\s*o )(.*)(from.*)\",self.raw_text()[:250])\n\t\t\n\t\tif recipient: \t\n\t\t\trecipient = recipient.group(2) \t\n\t\t\trecipient = re.sub(r\"(\\w+\\s*\\w+),.*\",r\"\\1\",recipient) #attempting to clear out titles and such\n\t\t\t# recipient = re.sub(r\"([sS]ecre[a-z]+ of the \\w+).*\",\"Secretary of the Navy\",recipient) \t\n\t\t\treturn recipient\n\t\treturn \"Unknown\"", "def async_clear_address(self, address: str) -> None:\n self._matched.pop(address, None)\n self._matched_connectable.pop(address, None)", "def remove_emails(text):\n return re.sub(r'([\\w0-9._-]+@[\\w0-9._-]+\\.[\\w0-9_-]+)', ' ', text)", "def addresses( data ) :\n return list( set(chain.from_iterable( [ re.sub(r'\\[.*?\\]\\s+','',x['C1']).split('; ') for x in data ] )))", "def decapsulate(self, other):\n s1 = self.to_bytes()\n s2 = Multiaddr(other).to_bytes()\n try:\n idx = s1.rindex(s2)\n except ValueError:\n # if multiaddr not contained, returns a copy\n return Multiaddr(self)\n return Multiaddr(s1[:idx])", "def clean_w_map(value, mapping):\n if value in mapping.keys():\n return mapping[value]\n return value", "def convert(first_name, insertion, last_name, zip_code, streetnumber, email):\n return {\n 'first_name': functions.clean(first_name),\n 'insertion': functions.clean(insertion, False, False, True),\n 'last_name': functions.clean(last_name),\n 'zip_code': functions.clean(zip_code, False, uppercase=True),\n 'streetnumber': functions.clean(streetnumber, False, uppercase=True),\n 'email': functions.clean(email, False, lowercase=True)\n }", "def _remove_area_code(phone):\n\n if not phone.startswith('+46'):\n return phone\n else:\n return '0' + phone[3:]", "def clean_addresses(df, column_name):\n parent_dir = pkg_resources.resource_filename(\"linkage.model.to_replace\", \"\")\n file_names = [\"redundant_in_addresses.json\", # Remove names of German states\n ]\n\n column = df[column_name]\n\n # Addresses to uppercase\n # apply func helps to preserve only numerical values otherwise they are deleted by upper func\n column = column.swifter.apply(lambda x: f' {x} ' if not isna(x) else x)\n\n # Remove all non-alphabetical characters, preserve unicode alphabets\n column = column.str.replace(r'[^\\w|]|[_]', r' ', regex=True)\n\n for fn in file_names:\n column = replace_with_json(column, parent_dir, fn)\n\n # Replace double space\n column = column.str.replace(' +', ' ', n=-1, case=False, regex=True)\n\n # Group single consecutive letters to words\n column.update(group_single_consecutive_letters(column))\n\n # Remove independent numbers\n column = column.str.replace(r'[0-9]+', r' ', regex=True)\n\n # Remove redundant spaces\n # Replace double space\n # Remove space at the beginning and end of the name\n column = column.str.replace(' +', ' ', n=-1, case=False, regex=True).str.strip()\n\n # Update the dataframe\n df.loc[:, column_name] = column", "def parse_building_address(addr_string):\n addr_string = re.sub(_regexp, '', addr_string)\n addr_string = re.sub(r'(?P<key>[a-zA-Z]+)', _replace_dir, addr_string)\n addr_string = re.sub(r'(?P<key>[a-zA-Z]+)', _replace_suffix, addr_string)\n addr_string = re.sub(_regexp_extra_space, ' ', addr_string)\n return addr_string.strip().upper()", "def unify_mail(email, primary_domain=None):\n if email is None:\n return ''\n email = email.strip()\n if email == '':\n return ''\n if '@' not in email:\n user = email\n domain = None\n else:\n (user, domain) = email.split('@')\n\n if not domain or domain in MAIL_DOMAINS:\n domain = primary_domain if primary_domain else MAIL_DOMAINS[0]\n return '%s@%s' % (user, domain)", "def eliminate(values):\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n digit = values[box]\n for peer in peers[box]:\n # values[peer] = values[peer].replace(digit, '')\n new_value = values[peer].replace(digit, '')\n assign_value(values, peer, new_value)\n return values", "def address_regex(self) -> Any:", "def eliminate(values):\n\tsolved = [box for box in boxes if len(values[box]) == 1]\n\tempties = [box for box in boxes if len(values[box]) == 0]\n\n\tfor empty in empties:\n\t\tvalues[empty] = '123456789'\n\n\tfor box in solved:\n\n\t\tfor peer in peers[box]:\n\t\t\tvalues = assign_value(values, peer, values[peer].replace(values[box], ''))\n\n\treturn values", "def test_clean_ip(self):\n\n raw_ip = 'client=mail-ed1-f51.google.com[209.85.208.51]'\n result = clean_ip(raw_ip)\n self.assertEqual(result, '209.85.208.51')", "def cleaning (data):", "def do_replace_addr(addr):\n do_replace = True\n\n # NOTE: This list should stay in sync with wlan_exp.util mac_addr_desc_map\n\n # Don't replace the broadcast address (FF-FF-FF-FF-FF-FF)\n if(addr == (0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF)):\n do_replace = False\n\n # Don't replace multicast IP v4 addresses (01-00-5E-xx-xx-xx)\n # http://technet.microsoft.com/en-us/library/cc957928.aspx\n if(addr[0:3] == (0x01, 0x00, 0x5E) and (addr[4] <= 0x7F)):\n do_replace = False\n\n # Don't replace multicast IP v6 addresses (33-33-xx-xx-xx-xx)\n # http://www.cavebear.com/archive/cavebear/Ethernet/multicast.html\n if(addr[0:2] == (0x33, 0x33)):\n do_replace = False\n\n # Don't replace Mango addresses (40-D8-55-04-2x-xx)\n if(addr[0:4] == (0x40, 0xD8, 0x55, 0x04) and ((addr[4] & 0x20) == 0x20)):\n do_replace = False\n\n return do_replace", "def clean_postal_code(self):\n return self.cleaned_data['postal_code'].strip()", "def keepAddresses(networkItems_):\n for i in networkItems_[:]:\n try:\n ip = netaddr.IPAddress(i)\n except:\n networkItems_.remove(i)\n return networkItems_", "def _format_address(address):\n if 'country' in address and address['country']:\n country = address['country']\n if country == 'CA':\n address['country'] = 'CANADA'\n elif country == 'US':\n address['country'] = 'UNITED STATES OF AMERICA'\n else:\n try:\n country: str = pycountry.countries.search_fuzzy(country)[0].name\n address['country'] = country.upper()\n except (AttributeError, TypeError):\n address['country'] = country\n\n return address", "def lookup(self, message) :\n message = email.message_from_string(message)\n \n # if the message is not to this project, ignore it\n trac_address = self.env.config.get('mail', 'address')\n if not trac_address:\n trac_address = self.env.config.get('notification', 'smtp_replyto')\n if not self.env.config.getbool('mail', 'accept_all') :\n to = list(email.Utils.parseaddr(message['to']))\n cc = message.get('cc','').strip()\n if cc:\n cc = [email.Utils.parseaddr(i.strip())[1] \n for i in cc.split(',') if i.strip()]\n to = to + cc\n delivered_to = message.get('delivered-to', '').strip()\n if delivered_to:\n to.append(email.Utils.parseaddr(delivered_to)[1])\n original_to = message.get('x-original-to', '').strip()\n if original_to:\n to.append(original_to)\n \n if trac_address not in to:\n raise AddressLookupException(\"Email (to : %s ) does not match Trac address: %s\" %(str(to), trac_address))\n \n return message", "def remove(self, transport):\r\n recipients = copy.copy(self.recipients)\r\n for address, recManager in recipients.iteritems():\r\n recManager.remove(transport)\r\n if not len(recManager.transports):\r\n del self.recipients[address]", "def sanitize(self, line):\n self.line_count = self.line_count + 1\n components = line.split(\",\")\n for comp in components:\n # if any field has IP in it, see if the value matches something in the dict\n items = comp.split(\"=\")\n if \"ip\" in items[0]:\n if items[1] in self._insts:\n # found this dirty IP in our learned dictionary, replace it\n dirty_ip = items[1]\n clean_ip = self._insts[dirty_ip]\n line = re.sub(dirty_ip, clean_ip, line, 1)\n\n # if this message has a pdu, clean up the pdu too\n msg_type = self._extract_by_key(line, \"type\")\n if \"sflow\" in msg_type or \"event\" in msg_type:\n pdu = self._extract_by_key(line, \"pdu\")\n # substitute the converted IP based on type\n if \".\" in dirty_ip:\n # v4\n line = re.sub(self._v4_string_to_hex(dirty_ip),\n self._v4_string_to_hex(clean_ip), line)\n\n pdu = self.fix_checksum(pdu)\n\n line = line[0: (line.find(\"pdu=\")) +4] + pdu + \",\\n\"\n else:\n # v6 - remove : and go to lower case before swap\n dirty_swap = re.sub(\":\", \"\", dirty_ip)\n dirty_swap = dirty_swap.lower()\n line = re.sub(dirty_swap, self._v4_string_to_hex(clean_ip), line)\n if (args.hexdump):\n pdu_hex = pdu\n pdu_hex = \" \".join(pdu_hex[i:i+2] for i in range(0, len(pdu_hex), 2)) #put timestamp and offset in front of pdu hex\n pdu_hex = pdu[0:15] + \" 000000 \" + pdu_hex + \" ,\\n\"\n hexdump_file.write(pdu_hex)\n return line", "def test_drop_emails():\n cleaner = TextCleaner()\n assert cleaner.transform([[\"test@webmail.com\"]])[\"corpus\"][0] == \"\"\n assert not cleaner.drops[\"email\"].dropna().empty", "def testGetAddresses3(self):\n self.shop.setMailFromAddress(\"john@doe.com\")\n \n sender = self.addresses.getSender()\n self.assertEqual(sender, \"Site Administrator <john@doe.com>\")\n \n # Just sender is set, hence receiver is same as sender\n receivers = self.addresses.getReceivers()\n self.assertEqual(receivers, (\"Site Administrator <john@doe.com>\",))\n \n # Name and address is set\n self.shop.setMailFromName(\"John Doe\")\n \n sender = self.addresses.getSender()\n self.assertEqual(sender, \"John Doe <john@doe.com>\")\n\n # Just sender is set, hence receiver is same as sender\n receivers = self.addresses.getReceivers()\n self.assertEqual(receivers, (\"John Doe <john@doe.com>\",))\n\n # Receivers set\n self.shop.setMailTo([\"Jane Doe <jane@doe.com>\"])\n\n sender = self.addresses.getSender()\n self.assertEqual(sender, \"John Doe <john@doe.com>\")\n \n receivers = self.addresses.getReceivers()\n self.assertEqual(receivers, (\"Jane Doe <jane@doe.com>\",))\n\n # More receivers set\n self.shop.setMailTo([\"Jane Doe <jane@doe.com>\", \"baby@joe.com\"])\n\n receivers = self.addresses.getReceivers()\n self.assertEqual(receivers, (\"Jane Doe <jane@doe.com>\", \"baby@joe.com\"))", "def clean_mentions(self, tweet):\n self.mentions = [tag.strip('@') for tag in tweet.split() if tag.startswith('@')]\n\n for mention in self.mentions:\n tweet = tweet.replace('@'+mention, '')\n\n tweet = self.clean_unnecessary_whitespaces(tweet)\n\n return tweet", "def clean_counselor(counselor):\n counselor = dict(counselor)\n\n if not REQUIRED_COUNSELOR_KEYS.issubset(set(counselor.keys())):\n raise ValueError('missing keys in counselor')\n\n lat_lng_keys = ('agc_ADDR_LATITUDE', 'agc_ADDR_LONGITUDE')\n for key in lat_lng_keys:\n counselor[key] = float_or_none(counselor[key])\n\n for key in ('city', 'nme'):\n counselor[key] = title_case(counselor[key])\n\n counselor['email'] = reformat_email(counselor['email'])\n counselor['weburl'] = reformat_weburl(counselor['weburl'])\n\n return counselor", "def remove_emails(text: str, emails=_EMAILS_RE) -> str:\n return emails.sub('', text)", "def eliminate(values):\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n digit = values[box]\n for peer in peers[box]:\n values[peer] = values[peer].replace(digit,'')\n return values", "def eliminate(values):\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n digit = values[box]\n for peer in peers[box]:\n values[peer] = values[peer].replace(digit,'')\n return values", "def clear_address(self): #DONE\n for component_name in self.__keys:\n self.address[component_name] = Component(component_name, '')", "def test_normalize_address_with_route_domain():\n\n # If route domain is not specified, add the default\n tests = [\n [\"1.2.3.4%1\", 2, \"1.2.3.4%1\", \"1.2.3.4\", 1],\n [\"1.2.3.4\", 2, \"1.2.3.4%2\", \"1.2.3.4\", 2],\n [\"64:ff9b::%1\", 2, \"64:ff9b::%1\", \"64:ff9b::\", 1],\n [\"64:ff9b::\", 2, \"64:ff9b::%2\", \"64:ff9b::\", 2]\n ]\n for test in tests:\n results = normalize_address_with_route_domain(test[0], test[1])\n assert results[0] == test[2]\n assert results[1] == test[3]\n assert results[2] == test[4]", "def remove_mentions(text):\n return ' '.join(re.sub(r'(?i)\\b[A-Z0-9._%+-]+@[A-Z0-9.-]+\\.[A-Z]{2,}\\b', ' ', text).split())", "def normalize_address(addr: str) -> str:\n # bitcoin hrps\n hrps = {net[\"bech32\"] + \"1\" for net in NETWORKS.values()}\n # liquid hrps\n # Blech32 addresses are intended for confidential assets\n hrps = hrps.union(\n {net[\"blech32\"] + \"1\" for net in NETWORKS.values() if \"blech32\" in net}\n )\n if addr.lower().startswith(tuple(hrps)):\n return addr.lower()\n return addr", "def remove_address(intent, session):\n sess_data = session.setdefault('attributes', {})\n sess_data['remove_address'] = True\n\n # Retrieve stored data just to check if it exists or not.\n user_data = database.get_user_data(session['user']['userId'])\n if not user_data:\n return reply.build(\"I already don't remember any addresses for you.\",\n is_end=True)\n elif sess_data.get('awaiting_confirmation'):\n # The user has requested removal and\n # we requested confirmation\n if intent['name'] == 'AMAZON.NoIntent':\n return reply.build(\"Okay, keeping your stored addresses.\",\n is_end=True)\n elif intent['name'] == 'AMAZON.YesIntent':\n succ = database.delete_user(session['user']['userId'])\n if succ:\n return reply.build(\"Okay, I've forgotten all the addresses \"\n \"you told me.\", is_end=True)\n else:\n # Only get here if the database interaction fails somehow\n return reply.build(\"Huh. Something went wrong.\", is_end=True)\n else:\n # Shouldn't ever get here.\n return reply.build(\"Sorry, I don't know what you mean. \"\n \"Try again?\", persist=sess_data, is_end=False)\n else:\n # Prompt the user for confirmation of data removal.\n sess_data['awaiting_confirmation'] = True\n return reply.build(\"Do you really want me to forget the addresses \"\n \"you gave me?\",\n reprompt='Say \"yes\" to delete all stored addresses '\n 'or \"no\" to not change anything.',\n persist=sess_data,\n is_end=False)", "def eliminate(values):\n complete_boxes = [box for box in values.keys() if len(values[box])==1]\n for box in complete_boxes:\n for peer in peers[box]:\n values = assign_value(values, peer, values[peer].replace(values[box], \"\"))\n \n return values", "def _sanitize_string(msg: str) -> str:\n for email in EMAIL_PATTERN.findall(msg):\n msg = msg.replace(email, re.sub(EMAIL_REPLACEMENT, EMAIL_SUBSTITUTION, email))\n for field_pattern in SENSITIVE_FIELD_PATTERNS:\n msg = field_pattern.sub(FIELD_SUBSTITUTION_PATTERN, msg)\n return msg", "def test_normalize_valid_commercial_address(self) -> None:\n commercial_address = valid_commercial_address()\n normalized = normalize_an_address(commercial_address)\n\n valid_address_assertions(\n test_method=self.TEST_METHOD,\n locale=\"domestic\",\n original_address=commercial_address,\n returned_address=normalized,\n expected_residential_indicator=False,\n )", "def getAddress(user):", "def clean_location(df):\n \n local = df['location'].astype(str)\n \n #geocoders read X St at Y St better than X & Y or X/Y\n local = local.str.replace(\"&\", \"at\")\n local = local.str.replace(\"/\", \"at\")\n \n #OpenAddress dataset has addresses in title case\n local = local.str.title()\n\n return df.assign(location=local.values)", "def _extract_email_address(self, from_email):\n res = email.utils.parseaddr(from_email)\n if len(res[1]) != 0:\n return res[1].lower()\n else:\n print(res, from_email)\n return \"\"", "def convert_address(self, addr_obj):\n return addr_obj.mailbox.decode() + '@' + addr_obj.host.decode()", "def _unadapt_domain_param(self, params: dict) -> dict:\n return params", "def test_clean_email_empty(self):\n\n raw_email = 'from=<>'\n result = clean_email(raw_email)\n self.assertEqual(result, '')", "def _clean_addresses_from_external_port(openstack_resource):\n # Get the external port using the resource id provided via port node\n external_port = openstack_resource.get()\n # Check if the current port node has allowed_address_pairs as part of\n # resource_config\n addresses_to_remove = openstack_resource.config.get(\n 'allowed_address_pairs')\n\n if addresses_to_remove:\n remote_addresses = external_port.allowed_address_pairs or []\n # Get the remote ips from the each pair\n remote_ips = \\\n [\n remote_address['ip_address']\n for remote_address\n in remote_addresses if remote_address.get('ip_address')\n ]\n\n # Get the ips need to be removed to the external port\n ips_to_remove = \\\n [\n address_to_remove['ip_address']\n for address_to_remove\n in addresses_to_remove if address_to_remove.get('ip_address')\n ]\n\n # Check if there are a common ips between old ips and the one we\n # should remove via node\n diff_ips = set(remote_ips) - set(ips_to_remove)\n diff_ips = list(diff_ips) if diff_ips else []\n updated_pairs = []\n for ip_address in diff_ips:\n updated_pairs.append({'ip_address': ip_address})\n\n # Update port for allowed paris\n openstack_resource.update({'allowed_address_pairs': updated_pairs})", "def remove_mentions(text):\r\n text = re.sub(r'@\\S+', '', text)\r\n text = re.sub(r'@', \"at\", text)\r\n return text", "def integrated_address_regex(self) -> Any:", "def test_normalize_unknown_address(self) -> None:\n address = unknown_address()\n normalized = normalize_an_address(address)\n\n valid_address_assertions(\n test_method=self.TEST_METHOD,\n locale=\"international\",\n original_address=address,\n returned_address=normalized,\n expected_residential_indicator=None,\n )", "def clean_ipv6_address(\n ip_str, unpack_ipv4=False, error_message=_(\"This is not a valid IPv6 address.\")\n):\n try:\n addr = ipaddress.IPv6Address(int(ipaddress.IPv6Address(ip_str)))\n except ValueError:\n raise ValidationError(error_message, code=\"invalid\")\n\n if unpack_ipv4 and addr.ipv4_mapped:\n return str(addr.ipv4_mapped)\n elif addr.ipv4_mapped:\n return \"::ffff:%s\" % str(addr.ipv4_mapped)\n\n return str(addr)", "def set_filter_address(self, addresses):\r\n if isinstance(addresses, basestring):\r\n addresses = [addresses]\r\n self.filter_src_addresses = addresses", "def clean_email(self, email):\n return email", "def _normalize_email_address(email_address: str) -> str:\n normalized = email_address.strip().lower()\n\n if not normalized or (' ' in normalized) or ('@' not in normalized):\n raise ValueError(f\"Invalid email address: '{email_address}'\")\n\n return normalized", "def parse_address_campus(address_str, address_campus_re, address_campus_room_re):\n address = {}\n errors = []\n if '$' not in address_str:\n match = re.search(address_campus_room_re, address_str)\n if match:\n address['addressLine1'] = match.group(1)\n else:\n # This leftover is either an erroneous email address or a building name\n if '@' in address_str:\n errors.append('Campus address seems to be email: {}'.format(address_str))\n #FIXME: Should this be saved to addressLine1 anyway.\n else:\n # It seems to be a building address\n address['addressLine2'] = address_str\n else:\n match = re.search(address_campus_re, address_str)\n if match:\n address['addressLine2'] = match.group(1)\n address['addressLine1'] = match.group(2)\n #else:\n # FIXME: here just for debug\n #errors.append('Cannot parse campus address: {}'.format(address_str))\n return (address, errors)", "def formatAddress():\n # Strings to load data\n stringFile = '/Users/Louis/Documents/Research/Code/cleanedData/'\n days = {'cleaned01-Dec-2015':2,# tuesday\n 'cleaned02-Dec-2015':3,# wednesday\n 'cleaned03-Dec-2015':4,# ...\n 'cleaned04-Dec-2015':5,\n 'cleaned07-Dec-2015':1,\n 'cleaned08-Dec-2015':2,\n 'cleaned09-Dec-2015':3,\n 'cleaned10-Dec-2015':4,\n 'cleaned11-Dec-2015':5,\n 'cleaned14-Dec-2015':1,\n 'cleaned15-Dec-2015':2,\n 'cleaned16-Dec-2015':3,\n 'cleaned17-Dec-2015':4,\n 'cleaned18-Dec-2015':5,\n 'cleaned21-Dec-2015':1}\n \n # Store results\n addresses = []\n CourierSuppliedAddresses = []\n \n for day in days.keys():\n # Configuration for CSV reading\n with open(stringFile+day+'_modified.csv') as csvfile:\n # Dictionary containing the info\n reader = csv.DictReader(csvfile,delimiter = ',')\n # print(day)\n \n for row in reader:\n addresses.append(row['Address'])\n CourierSuppliedAddresses.append(row['CourierSuppliedAddress'])\n \n addresses = list(set(addresses))\n addresses.sort()\n \n CourierSuppliedAddresses = list(set(CourierSuppliedAddresses))\n CourierSuppliedAddresses.sort()\n return addresses, CourierSuppliedAddresses", "def unpack_addresses(self, addresses_to_test):\n if len(addresses_to_test) == 0:\n raise ValueError(\n \"There were no arguments passed to the function. That is wrong. Closing\"\n )\n\n return_addresses = []\n for address in addresses_to_test:\n if \"/\" in address:\n try:\n six_or_four = ipaddress.ip_network(address)\n except ValueError:\n print(f\"{address} is not a valid subnet. Skipping.\")\n continue\n for address_host in six_or_four.hosts():\n return_addresses.append(str(address_host))\n else:\n try:\n ipaddress.ip_address(address)\n except ValueError:\n print(f\"{address} is not a valid address. Skipping.\")\n continue\n return_addresses.append(str(address))\n for address in return_addresses:\n try:\n ipaddress.ip_address(address)\n except ValueError:\n raise ValueError(f\"{address} is not an IPv4/v6 address. Shutting Down\")\n if len(return_addresses) > 0:\n return return_addresses\n else:\n raise ValueError(\"No usable addresses to scan\")", "def clean(c):", "def maploc(loc):\n\n\n loc = REGEX['parens'].sub('', loc)\n loc = REGEX['and'].sub('', loc)\n loc = REGEX['num'].sub('', loc)\n\n \"\"\"\n 'parens' 'and' 'single' 'num' 'seeley' 'iab' 'brh'\n \"\"\"\n \"\"\"\n /* For non-street address, strip room numbers */\n if (!location.match(' Ave')) {\n location = location.replace(/LL[0-9]/g, '').replace(/[0-9]/g, '');\n }\n /* Some text substitutions */\n location = location.replace('Seeley W.', '').replace('International Affairs Building', '420 W 118th St').replace('Broadway Residence Hall', '2900 Broadway');\n\n \"\"\"\n return loc + ', New York, NY 10027'", "def get_recipients(msg_parsed):\n recipients = []\n addr_fields = ['From', 'To', 'Cc', 'Bcc']\n\n for f in addr_fields:\n rfield = msg_parsed.get(f, \"\") # Empty string if field not present\n rlist = re.findall(ADDR_PATTERN, rfield)\n recipients.extend(rlist)\n\n return recipients", "def clean_phone(number_str):\n number_str = number_str or ''\n number_str = number_str.replace('(', '').replace(')', '')\n number_str = number_str.replace('ext. ', 'x').replace('ext ', 'x')\n number_str = number_str.split(',')[0].strip()\n\n if number_str:\n return number_str", "def __process_address(self, address: Tuple[int, int, int, int, int]) -> Dict[str, int]:\n return {\n 'interface': address[0],\n 'protocol': address[1],\n 'type': address[2],\n 'hardware_type': address[3],\n 'address': address[4],\n }", "def __init__(self, fromaddr, toaddrs, subject='', body='',\n smtphost='localhost'):\n\n if isinstance(toaddrs, StringType):\n self.toaddrs = [x.strip() for x in toaddrs.split(',') if x]\n else:\n self.toaddrs = toaddrs\n \n self.fromaddr = fromaddr\n self.subject = subject\n self.body = body\n self.smtphost = smtphost", "def split_email_addresses(line):\n if line:\n addrs = line.split(',')\n addrs = frozenset(map(lambda x: x.strip(), addrs))\n else:\n addrs = None\n return addrs", "def FilterRawEmail(raw_msg):\r\n links = []\r\n soup = BeautifulSoup(raw_msg, features=\"lxml\")\r\n for a_tag in soup.find_all(\"a\", href=True):\r\n link = a_tag[\"href\"]\r\n if (len(link) < 10):\r\n continue\r\n else:\r\n print(\"Before Cleaning: \", link, end=\"\\n\\n\")\r\n clean_link = parse.unquote_plus(quopri.decodestring(link).decode('utf-8'))\r\n print(\"Link: \", clean_link, end = \"\\n\\n\")\r\n links.append(clean_link)\r\n return links\r\n\r\n\r\n# =============================================================================\r\n# =============================================================================\r\n\r\n\r\n def WriteToFile(msg, file_name):\r\n \"\"\"Write out a message to a file for debugging purposes.\r\n Args:\r\n msg: a message object\r\n file_name: the output file name\r\n Returns:\r\n None\r\n \"\"\"\r\n out_msg = str(msg)\r\n file = open(file_name, \"w\")\r\n file.write(str(decoded_msg))", "def resolve_addressing(parsed_template: parse_templates.ParsedTemplateRefined,\n grpd: parse_pronoun_data.GRPD) -> (parse_templates.ParsedTemplateRefined,\n parse_pronoun_data.GRPD):\n\n new_template = copy.deepcopy(parsed_template)\n new_grpd = copy.deepcopy(grpd)\n for i in range(1, len(new_template), 2):\n id_value = new_template[i][\"id\"]\n if new_template[i][\"context\"] == \"address\":\n if ContextValues.get_value(grpd, id_value, \"gender-addressing\") in (\"f\", \"false\"):\n new_template[i][\"context\"] = \"personal-name\"\n\n return new_template, new_grpd" ]
[ "0.6710551", "0.6352095", "0.6117725", "0.6082749", "0.59901404", "0.5843147", "0.5725053", "0.56326616", "0.5629568", "0.56150633", "0.56043804", "0.5557785", "0.55360675", "0.5443463", "0.54358226", "0.5432276", "0.5409129", "0.5401632", "0.53814", "0.53644365", "0.5340619", "0.53253734", "0.52830714", "0.52778083", "0.52776635", "0.52694345", "0.52655226", "0.5253748", "0.5238448", "0.5235887", "0.5230427", "0.52264374", "0.52139693", "0.5208523", "0.5198346", "0.5193276", "0.5161895", "0.5159394", "0.5156508", "0.51347244", "0.5132968", "0.5120125", "0.5118976", "0.5111431", "0.51054937", "0.5099329", "0.50945914", "0.50865185", "0.5082014", "0.5073398", "0.50669956", "0.50661397", "0.5061742", "0.50604844", "0.5053332", "0.50503504", "0.50484914", "0.5040663", "0.503595", "0.50351614", "0.50333714", "0.502994", "0.50275266", "0.5012418", "0.5001451", "0.5001451", "0.49949515", "0.49872804", "0.49869666", "0.4978265", "0.4956355", "0.49440867", "0.49348357", "0.49310538", "0.49173895", "0.49134377", "0.4896161", "0.48949933", "0.48931423", "0.4892265", "0.4881729", "0.48506078", "0.48432645", "0.4843059", "0.483726", "0.48296916", "0.4828903", "0.4821697", "0.48174322", "0.4803576", "0.47970206", "0.47864857", "0.4781867", "0.4781745", "0.4777557", "0.477517", "0.47741458", "0.47738394", "0.47714868", "0.4771018" ]
0.7065244
0
Examine the text to see if we need to use full MIME parsing or not.
def use_full_parser(text): end_of_header_match = _end_of_simple_header_pattern.search(text) return end_of_header_match is not None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hasRawText(self, text):\n r = re.compile(r'<(p|blockquote|div|form|table|ul|ol|dl|pre|h\\d)[^>]*?>.*</\\1>',\n re.S).sub('', text.strip()).strip()\n r = re.compile(r'<(hr|br)[^>]*?/>').sub('', r)\n return '' != r", "def is_text(content):\n if b\"\\0\" in content:\n return False\n if not content: # Empty files are considered text\n return True\n # Try to decode as UTF-8\n try:\n content.decode(\"utf8\")\n except UnicodeDecodeError:\n return False\n else:\n return True", "def test_text_email_only(self):\n data = mailgun_payload\n del data['stripped-html']\n request = self.factory.post(self.url, data=data)\n email = self.parser.parse(request)\n self._assertEmailParsedCorrectly(email, mailgun_payload)", "def process(self) -> None:\n self.parsed = email.message_from_bytes(self.rawmailcontent, policy=email.policy.EmailPolicy()) # type: email.message.EmailMessage\n\n self.subject = self.parsed[\"subject\"]\n\n if self.parsed[\"X-Jicket-Initial-ReplyID\"] is not None and self.parsed[\"X-Jicket-Initial-ReplyID\"] == self.parsed[\"In-Reply-To\"]:\n self.threadstarter = True\n elif self.config.ticketAddress in self.parsed[\"From\"]: # Take more heuristic approach\n self.threadstarter = True\n\n self.rawmailcontent = None # No need to store after processing\n\n self.get_text_bodies(self.parsed)\n self.textfrombodies()", "def match_mime_type(self, src: str):\n for key in self.keys():\n if Pattern.test(key, src):\n return self[key]\n return \"text/plain\"", "def validate_txtfile(path):\n bFile = True if mimetypes.guess_type(path)[0] == 'text/plain' else False\n return bFile", "def mime_string(file_path: Path, text_signature_detection: bool = False) -> str:\n guess, encoding = mimetypes.guess_type(file_path.resolve()) # symbolic links should be resolved for mimetypes.guess_type(...) to work correctly\n guess = guess if guess else 'application/octet-stream'\n if guess.startswith('text/') and not encoding and text_signature_detection:\n with suppress(Exception):\n with closing(UniversalDetector()) as enc_detector, open(file_path, 'rb') as file:\n for line in file:\n enc_detector.feed(line)\n if enc_detector.done:\n encoding = enc_detector.result['encoding']\n break\n if encoding:\n return f'{guess};charset={encoding}'\n return guess", "def text_media_type(name):\n return name.endswith(('.txt', '.py', '.lg', '.doc', '.rtf'))", "def _check_mimetype(self):\n if self.mimetype in Config.aliases:\n mimetype = Config.aliases[self.mimetype]\n else:\n mimetype = self.mimetype\n expected_extensions = mimetypes.guess_all_extensions(mimetype,\n strict=False)\n if expected_extensions:\n if self.has_extension and self.extension not in expected_extensions:\n # LOG: improve this string\n self.make_dangerous('expected extensions')", "def textparts(msg):\n return set(filter(lambda part: part.get_content_maintype() == 'text',\n msg.walk()))", "def is_text( self ):\n return self.get_main_type() == 'text'", "def check(self, text):\n\n try:\n console.print(self.parser.parse(text)[\"result\"][1:], style=\"green\")\n return True\n\n except:\n console.print(\"An error has occurred while trying to parse the typo!\", style=\"red\")\n return False", "def process_part(self, part):\n\t\tcontent_type = part.get_content_type()\n\t\tfilename = part.get_filename()\n\t\tif content_type == 'text/plain' and not filename:\n\t\t\tself.text_content += self.get_payload(part)\n\n\t\telif content_type == 'text/html':\n\t\t\tself.html_content += self.get_payload(part)\n\n\t\telif content_type == 'message/rfc822':\n\t\t\t# sent by outlook when another email is sent as an attachment to this email\n\t\t\tself.show_attached_email_headers_in_content(part)\n\n\t\telif content_type == 'text/calendar':\n\t\t\tself.set_calendar_invite(part)\n\n\t\telif filename or 'image' in content_type:\n\t\t\tself.get_attachment(part)", "def is_plain_text(self):\n return self._tag == 'plain_text'", "def filter(self, *args):\n exps = []\n\n #this ones can be improved. some are not getting extracted\n if \"retweets\" in args:\n exps.append(re.compile(\"^RT ?(?<=^|(?<=[^a-zA-Z0-9-_\\.]))@([A-Za-z]+[A-Za-z0-9-_]+):\"))\n if \"emoticons\" in args:\n exps.append(\"emoticons\")\n if \"flags\" in args:\n exps.append(re.compile(u\"[\\U0001F1E6-\\U0001F1FF]\"))\n if \"handles\" in args:\n # Handles at start of string\n exps.append(re.compile(\"^\\s*((?<=^|(?<=[^\\S]))@([\\S]+)\\s*)*\"))\n # Handles at end of string\n exps.append(re.compile(\"\\s+((?<=^|(?<=[^\\S]))@(\\S+)\\s*)*$\"))\n if \"urls\" in args:\n exps.append(re.compile(\"(https?|ftp)://[^\\s/$.?#].[^\\s]*\"))\n if \"hashtags\" in args:\n # Hastags at start of string\n exps.append(re.compile(\"^\\s*((?<=^|(?<=[^\\S]))#([\\S]+)\\s*)*\"))\n # Hashtags at end of string\n exps.append(re.compile(\"\\s+((?<=^|(?<=[^\\S]))#(\\S+)\\s*)*$\"))\n\n\n # Use all filters\n if \"*\" in args and not exps:\n return self.filter(\"retweets\", \"emoticons\", \"flags\", \"handles\", \"urls\", \"hashtags\")\n\n filtering_text = self.raw_text\n\n for expression in exps:\n if expression == \"emoticons\":\n filtering_text = ''.join(c for c in filtering_text if c not in emoji.UNICODE_EMOJI)\n else:\n filtering_text = re.sub(expression, \"\", filtering_text)\n\n # Remove extra spaces\n self.clean_text = re.sub(r\"\\s\\s+\", ' ', filtering_text.strip())\n return self.clean_text", "def check_eligible_mimetype(self, ctype, uid):\n self.helper.log_debug(\n 'check_eligible_mimtype: checking content-type %s of msg uid %s' %\n (ctype, uid))\n if ctype == \"application/zip\":\n return True\n elif ctype == \"application/gzip\":\n return True\n elif ctype == \"application/x-gzip\":\n return True\n elif ctype == \"application/octet-stream\":\n # Non-standard mimetype used by Amazon SES dmarc reports\n return True\n elif ctype == \"application-x-gzip\":\n # Non-standard mimetype used by Comcast dmarc reports\n return True\n elif ctype == \"application/x-zip-compressed\":\n # Non-standard mimetype used by Yahoo dmarc reports\n return True\n elif ctype == \"application/xml\":\n return True\n elif ctype == \"text/xml\":\n return True\n else:\n self.helper.log_debug(\n 'check_eligible_mimtype: skipping content-type %s of msg uid %s' %\n (ctype, uid))\n return False", "def extract_mime(self, mime, def_mime='unk'):\n self.mime = def_mime\n if mime:\n self.mime = self.MIME_RE.split(mime, 1)[0]", "def validate(sender_id, message_text, attachment_type, postback, quick_reply):\n\n if message_text:\n return True, dict()\n else:\n return False, dict(message_text='Want to add some tags?')", "def isHTML(content):\n\n return '<html' in content or 'html>' in content", "def is_content(cls, path_or_content):\n return any(path_or_content.lstrip().startswith(s) for s in cls.valid_content_start)", "def parse(self):\n\t\tfor part in self.mail.walk():\n\t\t\tself.process_part(part)", "def verify_text(self, text):\n pass", "def hasContents():", "def retweet_filter(self, text):\n return not text.lower().startswith('rt')", "def test_fetchParserTextSection(self):\n p = imap4._FetchParser()\n p.parseString(b\"BODY[TEXT]\")\n self.assertEqual(len(p.result), 1)\n self.assertIsInstance(p.result[0], p.Body)\n self.assertEqual(p.result[0].peek, False)\n self.assertIsInstance(p.result[0].text, p.Text)\n self.assertEqual(bytes(p.result[0]), b'BODY[TEXT]')", "def is_retweet(self,text):\n if text[0] == 'rt':\n return True\n else:\n return False", "def maybe_text(instream):\n if instream.mode == 'w':\n # output binary streams *could* hold text\n # (this is not about the file type, but about the content)\n return True\n try:\n sample = instream.peek(_TEXT_SAMPLE_SIZE)\n except EnvironmentError:\n return None\n if set(sample) & set(_NON_TEXT_BYTES):\n logging.debug(\n 'Found unexpected bytes: identifying unknown input stream as binary.'\n )\n return False\n try:\n sample.decode('utf-8')\n except UnicodeDecodeError as err:\n # need to ensure we ignore errors due to clipping inside a utf-8 sequence\n if err.reason != 'unexpected end of data':\n logging.debug(\n 'Found non-UTF8: identifying unknown input stream as binary.'\n )\n return False\n logging.debug('Tentatively identifying unknown input stream as text.')\n return True", "def textfrombodies(self) -> str:\n type_priority = [\"plain\", \"html\", \"other\"] # TODO: Make configurable\n\n for texttype in type_priority:\n if texttype == \"plain\" and texttype in self.textbodies:\n \"\"\"Text is plain, so it can be used verbatim\"\"\"\n return self.textbodies[texttype]\n if texttype == \"html\" and texttype in self.textbodies:\n \"\"\"HTML text. Convert to markup with html2text and remove extra spaces\"\"\"\n text = html2text.html2text(self.textbodies[texttype])\n # Remove every second newline which is added to distinguish between paragraphs in Markdown, but makes\n # the jira ticket hard to read.\n return re.sub(\"(\\n.*?)\\n\", \"\\g<1>\", text)\n if texttype == \"other\" and len(self.textbodies):\n # If no other text is found, return the first available body if any.\n return self.textbodies[list(self.textbodies.keys())[0]]\n return \"The email contained no text bodies.\"", "def __init__(self, content):\n\t\tself.raw = content\n\t\tself.mail = email.message_from_string(self.raw)\n\n\t\tself.text_content = ''\n\t\tself.html_content = ''\n\t\tself.attachments = []\n\t\tself.cid_map = {}\n\t\tself.parse()\n\t\tself.set_content_and_type()\n\t\tself.set_subject()\n\t\tself.set_from()\n\t\tself.message_id = self.mail.get('Message-ID')\n\n\n\t\tself.unique_id = get_unique_id(self.mail)\n\n\t\t# gmail mailing-list compatibility\n\t\t# use X-Original-Sender if available, as gmail sometimes modifies the 'From'\n\t\t# _from_email = self.mail.get(\"X-Original-From\") or self.mail[\"From\"]\n\t\t# \n\t\t# self.from_email = extract_email_id(_from_email)\n\t\t# if self.from_email:\n\t\t# \tself.from_email = self.from_email.lower()\n\t\t# \n\t\t# #self.from_real_name = email.utils.parseaddr(_from_email)[0]\n\t\t# \n\t\t# _from_real_name = decode_header(email.utils.parseaddr(_from_email)[0])\n\t\t# self.from_real_name = decode_header(email.utils.parseaddr(_from_email)[0])[0][0] or \"\"\n\t\t# \n\t\t# try:\n\t\t# \tif _from_real_name[0][1]:\n\t\t# \t\tself.from_real_name = self.from_real_name.decode(_from_real_name[0][1])\n\t\t# \telse:\n\t\t# \t\t# assume that the encoding is utf-8\n\t\t# \t\tself.from_real_name = self.from_real_name.decode(\"utf-8\")\n\t\t# except UnicodeDecodeError,e:\n\t\t# \tprint e\n\t\t# \tpass\n\n\t\t#self.from_real_name = email.Header.decode_header(email.utils.parseaddr(_from_email)[0])[0][0]\n\t\tself.To = self.mail.get(\"To\")\n\t\tif self.To:\n\t\t\tto = u\"\"\n\t\t\tfor name, encoding in decode_header(self.To):\n\t\t\t\tif encoding:\n\t\t\t\t\tto += name.decode(encoding)\n\t\t\t\telse:\n\t\t\t\t\tto += name\n\t\t\tself.To = to.lower()\n\t\tself.CC = self.mail.get(\"CC\")\n\t\tif self.CC:\n\t\t\tself.CC = self.CC.lower()\n\t\tif self.mail[\"Date\"]:\n\t\t\ttry:\n\t\t\t\tutc = email.utils.mktime_tz(email.utils.parsedate_tz(self.mail[\"Date\"]))\n\t\t\t\tutc_dt = datetime.datetime.utcfromtimestamp(utc)\n\t\t\t\tself.date = convert_utc_to_user_timezone(utc_dt).strftime('%Y-%m-%d %H:%M:%S')\n\t\t\texcept:\n\t\t\t\tself.date = now()\n\t\telse:\n\t\t\tself.date = now()\n\t\tif self.date > now():\n\t\t\tself.date = now()", "def clean_text(self, text) -> Union[str, None]:\n if text and ''.join(text.split()):\n if type(text) == bytes: #Decoding byte strings\n text = text.decode('utf-8')\n #Removing emails + ***.com urls\n text = ' '.join([item for item in text.split() if '@' not in item and '.com' not in item])\n text = ' '.join(text.split()) #removing all multiple spaces\n if text: return text\n # UNCLEAN_TEXT.inc()\n return None", "def parseOutText(f):\n\n\n f.seek(0) ### go back to beginning of file (annoying)\n all_text = f.read()\n ### split off metadata\n \n content = re.split(\"X-FileName:.*$\", all_text, flags=re.MULTILINE, maxsplit=1)\n words = \"\"\n if len(content) > 1:\n text_string = content[1]\n\n ## remove mails that are forwarded or to which are responded\n # e.g. ---------------------- Forwarded\"\n text_string = re.split(\"-*\\sForwarded\", text_string, maxsplit=1)[0]\n\n # -----Original Message-----\n text_string = re.split(\"-*\\Original\\sMessage\", text_string, maxsplit=1)[0]\n\n # Vince J Kaminski@ECT\n # 04/30/2001 02:28 PM\n # To:\tStanley Horton/Corp/Enron@Enron, Danny McCarty/ET&S/Enron@Enron\n # cc:\tVince J Kaminski/HOU/ECT@ECT \n # or\n # Vince J Kaminski@ECT\n # 04/30/2001 02:28 PM\n # to:\tStanley Horton/Corp/Enron@Enron, Danny McCarty/ET&S/Enron@Enron\n # cc:\tVince J Kaminski/HOU/ECT@ECT \n \n text_string = re.split(\"((.*\\n){2})[Tt]o:\\s\", text_string, maxsplit=1)[0]\n\n ### remove punctuation\n # should be autopmatically by scikit learn\n #text_string = text_string.translate(string.maketrans(\"\", \"\"), string.punctuation)\n\n ### project part 2: comment out the line below\n #words = text_string\n\n ### split the text string into individual words, stem each word,\n ### and append the stemmed word to words (make sure there's a single\n ### space between each stemmed word)\n from nltk.stem.snowball import SnowballStemmer\n\n stemmer = SnowballStemmer(\"english\")\n words = [stemmer.stem(word) for word in text_string.split()]\n\n\n\n return \" \".join(words)", "def detect(stream):\n try:\n parse(stream)\n return True\n except (xml.parsers.expat.ExpatError, TypeError):\n return False", "def IsHtml(data):\n # Remove banners and XML header. Convert to lower case for easy search.\n data = ''.join(data.split('\\n')).lower()\n pattern = re.compile('<html>.*?<body.*?>.*?</body>.*?</html>')\n if pattern.findall(data):\n return True\n else:\n return False", "def _ConsumeTextForPlugin(self):\n return (self._plugin_stack and\n self._plugin_stack[-1][\"id\"] in self._RAW_PLUGINS)", "def _text(self, text):\r\n URL_REGEX.sub(self._parse_urls, text)\r\n USERNAME_REGEX.sub(self._parse_users, text)\r\n LIST_REGEX.sub(self._parse_lists, text)\r\n HASHTAG_REGEX.sub(self._parse_tags, text)\r\n return None", "def myProcessor(text):\n if re.match(\"RT .+\", text) is not None:\n match = re.match(\"RT (.+)\", text)\n group1 = match.group(1)\n return group1 \n else:\n return text", "def is_encoded(self,text):\n \n try:\n str(text)\n except:\n return False\n else:\n return True", "def canProcess(self, event, meta):\n if len(self.subscribe) and event not in self.subscribe:\n return False\n\n if len(self.mime) and meta.has_key('getcontenttype'):\n nodeMime = meta['getcontenttype']\n for mime in self.mime:\n if nodeMime.startswith(mime):\n return True\n return False\n\n return True", "def parse_content(content):\n attachments = []\n body = None\n html = None\n\n for part in content.walk():\n if part.get('Content-Disposition') is not None:\n decoded_data = decode_attachment(part)\n\n attachment = parse_attachment(part)\n if attachment:\n attachments.append(attachment)\n elif part.get_content_type() == \"text/plain\":\n if body is None:\n body = \"\"\n body += unicode(\n part.get_payload(decode=True),\n part.get_content_charset(),\n 'replace'\n ).encode('utf8', 'replace')\n elif part.get_content_type() == \"text/html\":\n if html is None:\n html = \"\"\n html += unicode(\n part.get_payload(decode=True),\n part.get_content_charset(),\n 'replace'\n ).encode('utf8', 'replace')\n # return the parsed data\n return {\n 'body': body,\n 'html': html,\n 'filename': decoded_data['filename']\n # 'attachments': attachments\n }", "def FilterRawEmail(raw_msg):\r\n links = []\r\n soup = BeautifulSoup(raw_msg, features=\"lxml\")\r\n for a_tag in soup.find_all(\"a\", href=True):\r\n link = a_tag[\"href\"]\r\n if (len(link) < 10):\r\n continue\r\n else:\r\n print(\"Before Cleaning: \", link, end=\"\\n\\n\")\r\n clean_link = parse.unquote_plus(quopri.decodestring(link).decode('utf-8'))\r\n print(\"Link: \", clean_link, end = \"\\n\\n\")\r\n links.append(clean_link)\r\n return links\r\n\r\n\r\n# =============================================================================\r\n# =============================================================================\r\n\r\n\r\n def WriteToFile(msg, file_name):\r\n \"\"\"Write out a message to a file for debugging purposes.\r\n Args:\r\n msg: a message object\r\n file_name: the output file name\r\n Returns:\r\n None\r\n \"\"\"\r\n out_msg = str(msg)\r\n file = open(file_name, \"w\")\r\n file.write(str(decoded_msg))", "def test_textPart(self):\n body = b'hello, world\\nhow are you?\\ngoodbye\\n'\n major = 'text'\n minor = 'jpeg'\n charset = 'us-ascii'\n identifier = 'some kind of id'\n description = 'great justice'\n encoding = 'maximum'\n msg = FakeyMessage({\n 'content-type': major + '/' + minor +\n '; charset=' + charset + '; x=y',\n 'content-id': identifier,\n 'content-description': description,\n 'content-transfer-encoding': encoding,\n }, (), b'', body, 123, None)\n structure = imap4.getBodyStructure(msg)\n self.assertEqual(\n [major, minor, [\"charset\", charset, 'x', 'y'], identifier,\n description, encoding, len(body), len(body.splitlines())],\n structure)", "def processFaxbotMessage(self, txt):\r\n with self.__lock:\r\n if \"I do not understand your request\" in txt:\r\n replyTxt = (\"FaxBot does not have the requested monster '{}'. \"\r\n \"(Check the list at {} )\"\r\n .format(self._lastRequest, self.fax_list_url)) \r\n self._lastRequest = None\r\n self._lastRequestTime = None\r\n return replyTxt\r\n if \"just delivered a fax\" in txt:\r\n self._lastRequest = None\r\n self._lastRequestTime = None\r\n return (\"FaxBot received the request too early. \"\r\n \"Please try again.\")\r\n if \"try again tomorrow\" in txt:\r\n self._noMoreFaxesTime = utcTime()\r\n txt = (\"I'm not allowed to request any more faxes today. \"\r\n \"Request manually with /w FaxBot {}\"\r\n .format(self._lastRequest))\r\n self._lastRequest = None\r\n self._lastRequestTime = utcTime()\r\n return txt\r\n m = re.search(r'has copied', txt)\r\n if m is not None:\r\n self._lastRequest = None\r\n self._lastRequestTime = None\r\n self._lastFaxBotTime = utcTime()\r\n # suppress output from checkForNewFax since we are returning\r\n # the text, to be output later\r\n return self.checkForNewFax(False)\r\n self._lastRequest = None\r\n self._lastRequestTime = None\r\n return \"Received message from FaxBot: {}\".format(txt)", "def process_body(text):\n # if text != None:\n if text is not None:\n soup = BeautifulSoup(str(text), 'html.parser')\n try:\n soup.find('blockquote').decompose()\n contained_quote = True\n\n except AttributeError:\n contained_quote = False\n\n cleaned = soup.get_text()\n cleaned = unicodedata.normalize(\"NFKD\", cleaned)\n\n return cleaned, contained_quote\n else:\n cleaned = float(\"nan\")\n contained_quote = float(\"nan\")\n return cleaned, contained_quote", "def test_mime_lookup(self):\n mime_out_test_path = os.path.join(THIS_DIR, 'file-blobs.out')\n mime_lookup = MimeLookup(mime_out_test_path)\n self.assertEqual(mime_lookup.get_entry_count(), 5)\n self.assertEqual(mime_lookup.get_mime_string('4b11cb448cab68470c546bc52220b01fbc4572f7'),\n 'image/png; charset=binary')\n self.assertEqual(mime_lookup.get_mime_string('f8fa2aa81a623f9847436c5162d4e775e04cd948'),\n 'text/plain; charset=us-ascii')\n self.assertEqual(mime_lookup.get_mime_string('9f422292259b59ee6c9ad7a25180b0afc16f47e9'),\n LONG_MIME)\n self.assertEqual(mime_lookup.get_mime_string('d1717e616fdae20110acb51b3ba3a37350628131'),\n 'application/pdf; charset=binary')\n self.assertEqual(mime_lookup.get_mime_string('a7510ac5483396687bf670860f48d21eecede68a'),\n 'application/zip; charset=binary')", "def test_parseUnformattedText(self):\n self.assertEqual(irc.parseFormattedText(\"hello\"), A.normal[\"hello\"])", "def testIsText(self):\n parser = text_parser.PyparsingSingleLineTextParser()\n\n bytes_in = b'this is My Weird ASCII and non whatever string.'\n self.assertTrue(parser._IsText(bytes_in))\n\n bytes_in = 'Plaso Síar Og Raðar Þessu'\n self.assertTrue(parser._IsText(bytes_in))\n\n bytes_in = b'\\x01\\\\62LSO\\xFF'\n self.assertFalse(parser._IsText(bytes_in))\n\n bytes_in = b'T\\x00h\\x00i\\x00s\\x00\\x20\\x00'\n self.assertTrue(parser._IsText(bytes_in))\n\n bytes_in = b'Ascii\\x00'\n self.assertTrue(parser._IsText(bytes_in))\n\n bytes_in = b'Ascii Open then...\\x00\\x99\\x23'\n self.assertFalse(parser._IsText(bytes_in))", "def process_MESSAGE_TYPE_EMG(self, raw):\n\n pass", "def checkParse(self, epytext, xml=None):\n errors = []\n out = parse(epytext, errors)\n if out is None: out = ''\n else: out = out.childNodes[0].toxml().strip()\n if out[:9] == '<epytext>' and out[-10:] == '</epytext>':\n out = out[9:-10]\n \n self.failIfParseError(epytext, errors)\n if xml:\n self.failUnlessEqual(`out`, `xml.strip()`)", "def get_text_from_email(msg):\n parts = []\n for part in msg.walk():\n if part.get_content_type() == 'text/plain':\n parts.append(part.get_payload())\n return ''.join(parts)", "def process_message(mail):\n\tmessage = email.message_from_string(mail)\t#parsing metadata\n\tdatetuple = email.utils.parsedate_tz(message.__getitem__('Date'))\n\tfiledirectory = basedirectory\n\tif not datetuple:\n\t\tdatetuple = email.utils.parsedate_tz(message.__getitem__('Delivery-date'))\n\tif directory_for_year: \n\t\tfiledirectory = os.path.join(filedirectory, str(datetuple[0]))\n\tif directory_for_month:\n\t\tfiledirectory = os.path.join(filedirectory, str(datetuple[1])) \n\tdateposix = email.utils.mktime_tz(datetuple)\n\tlocaldate = datetime.datetime.fromtimestamp(dateposix)\n\tdatestring = localdate.strftime('%Y%m%d-%H%M') # +'-'+'-'.join(time.tzname) #\n\tsender = email.utils.parseaddr(message['To'])[1].replace('@','_').replace('.','-')\n\tsubject = email.header.decode_header(message['Subject'])[0][0]\n\tfilename = datestring + '_' + sender[:60] + '_' + subject[:60]\n\n\t# parsing mail content\n\tmailstring = ''\n\tfor headername, headervalue in message.items():\n\t\tmailstring += headername + ': ' + headervalue + '\\r\\n'\t# add \\r\\n or\n\tif message.get_content_maintype() == 'text':\n\t\tmailstring += message.get_payload(decode=True)\n\n\t# handle multipart: \n\telif message.get_content_maintype() == 'multipart':\n\t\tpartcounter = 0\n\t\tfor part in message.walk():\n\t\t\tif part.get_content_maintype() == 'text':\t# also: text/html\n\t\t\t\tfor header, value in part.items():\n\t\t\t\t\tmailstring += header + ': ' + value + '\\r\\n'\n\t\t\t\t\tmailstring += '\\r\\n' + part.get_payload(decode=True) + '\\r\\n'\n\t\t\t# skip multipart containers\n\t\t\telif part.get_content_maintype() != 'multipart':\n\t\t\t\tpartcounter += 1\n\t\t\t\ttry:\n\t\t\t\t\tattachmentname = email.header.decode_header(part.get_filename())[0][0]\n\t\t\t\texcept:\n\t\t\t\t\tattachmentname = \"\"\n\t\t\t\t\tprint(\"Error when parsing filename.\")\n\t\t\t\tif not attachmentname:\n\t\t\t\t\text = mimetypes.guess_extension(part.get_content_type())\n\t\t\t\t\tif not ext:\n\t\t\t\t\t\text = '.bin'\t# use generic if unknown extension\n\t\t\t\t\tattachmentname = 'attachment' + str(partcounter) + ext\n\t\t\t\tattfilename = filename + '_' + attachmentname\n\t\t\t\twrite_to_file(filedirectory, attfilename, part.get_payload(decode=True))\n\twrite_to_file(filedirectory, filename+'.txt', mailstring)", "def images_media_filter(hash_str, mime_type):\n return mime_type in MIME_TO_EXTESION_MAPPING", "def _check_extension(self):\n if self.extension in Config.override_ext:\n expected_mimetype = Config.override_ext[self.extension]\n else:\n expected_mimetype, encoding = mimetypes.guess_type(self.src_path,\n strict=False)\n if expected_mimetype in Config.aliases:\n expected_mimetype = Config.aliases[expected_mimetype]\n is_known_extension = self.extension in mimetypes.types_map.keys()\n if is_known_extension and expected_mimetype != self.mimetype:\n # LOG: improve this string\n self.make_dangerous('expected_mimetype')", "def is_multipartite(self):\n return True", "def downloaded_transcript_contains_text(self, transcript_format, text_to_search, video_display_name=None):\r\n transcript_selector = self.get_element_selector(video_display_name, VIDEO_MENUS['transcript-format'])\r\n\r\n # check if we have a transcript with correct format\r\n if '.' + transcript_format not in self.q(css=transcript_selector).text[0]:\r\n return False\r\n\r\n formats = {\r\n 'srt': 'application/x-subrip',\r\n 'txt': 'text/plain',\r\n }\r\n\r\n transcript_url_selector = self.get_element_selector(video_display_name, VIDEO_BUTTONS['download_transcript'])\r\n url = self.q(css=transcript_url_selector).attrs('href')[0]\r\n result, headers, content = self._get_transcript(url)\r\n\r\n if result is False:\r\n return False\r\n\r\n if formats[transcript_format] not in headers.get('content-type', ''):\r\n return False\r\n\r\n if text_to_search not in content.decode('utf-8'):\r\n return False\r\n\r\n return True", "def parse(self):\n\n text = self.text.li\n\n # helper function to parse both BeautifulSoup tags and NavigableStrings\n def extract_text(x):\n if type(x).__name__ == \"NavigableString\":\n return x\n elif x.name == 'br':\n return '\\n'\n else:\n return x.get_text()\n\n # helper function to get text from a bullet, ignoring potential\n # sub-bullets or images\n def get_bullet_parts(q):\n parts = []\n for c in q.children:\n if c.name == 'ul':\n break\n elif c.name == 'div' and 'thumb' in c['class']:\n pass\n elif c.name == 'a' and 'class' in c.attrs and 'autonumber' in c['class']:\n pass\n else:\n parts.append(c)\n return parts\n\n def is_english(quote, quote_parts=None):\n # reject quotes not in latin alphabet\n alpha = 'abcdefghijklmnopqrstuvwzyz'\n spaceless = quote.replace(' ', '')\n if not len(spaceless):\n print(quote)\n return False\n prop_latin = sum(map(lambda x: x in alpha, spaceless.lower())) / len(spaceless)\n if prop_latin < .6:\n print(quote)\n return False\n\n # figure out whether quote is in italics\n textlen = len(quote)\n try:\n italiclen = len(''.join([extract_text(x) for x in quote_parts if x.name=='i']))\n except:\n italiclen = 0\n if italiclen + 5 > textlen:\n is_italic = True\n else:\n is_italic = False\n\n is_en_list = [en_dict.check(s.strip('\\'\"(){}[].?!-—’,<>')) for s in quote.split() if len(s.strip('\\'\"(){}[].?!-—’,<>'))]\n en_proportion = (sum(is_en_list)+2)/len(is_en_list)\n if en_proportion > .6 and not is_italic:\n return True\n elif en_proportion > .8 and is_italic:\n return True\n else:\n print(quote)\n return False\n\n\n # get sub-bullets which might include source name\n meta_info = text.ul\n quote_parts = get_bullet_parts(text)\n try:\n quote = ''.join(map(extract_text, quote_parts)).strip()\n # quote in foreign language, try next subbullet\n if not is_english(quote, quote_parts):\n if meta_info:\n old_quote = quote\n bullets = meta_info.find_all('li')\n quote_parts = get_bullet_parts(bullets[0])\n quote = ''.join(map(extract_text, quote_parts)).strip()\n # check if subbullet seems to be in english\n if is_english(quote, quote_parts) and len(quote) > len(old_quote)*.6:\n badwords = ['pp.', 'p.', 'ch.', 'chapter', 'page', 'chap.', 'act', 'book']\n if sum([quote.lower().startswith(b) for b in badwords]) > 0:\n self.invalid = True\n else:\n self.quote = quote\n if len(bullets) > 1:\n source_parts = get_bullet_parts(bullets[1])\n self.potential_source = ''.join(map(extract_text, source_parts)).strip()\n else:\n self.invalid = True\n else:\n self.invalid = True\n print(\"foreign with no meta-info:\", quote)\n else:\n self.quote = quote\n if meta_info:\n source_parts = get_bullet_parts(meta_info.li)\n self.potential_source = ''.join(map(extract_text, source_parts)).strip()\n # try to catch things like chapter headings that get through from bad parses\n badwords = ['p.', 'pp.', 'ch.', 'chapter', 'page', 'chap.']\n if len(quote) < 25 and sum([(b in quote.lower().split()) for b in badwords]) > 0:\n self.invalid = True\n if ('\\\\displaystyle' in quote):\n self.invalid = True\n badwords = ['pp.', 'p.', 'ch.', 'chapter', 'page', 'chap.', 'act', 'book']\n if self.potential_source and sum([self.potential_source.lower().startswith(b) for b in badwords]) > 0:\n self.potential_source = None\n except Exception as e:\n print(e)\n print(quote_parts, meta_info)\n self.invalid = True", "async def match(cls, entry: \"TaskEntry\", text: Text):\n if (result := cls.re_match.fullmatch(text.plain)) :\n return result", "def is_resent(self):\n return self.unixtext.find(\"...RESENT\") > 0", "def _file_can_be_compressed(filename):\n content_type = ''\n with open(filename, 'rb') as f:\n content_type = _get_content_type(f)\n return content_type in TEXT_TYPES", "def looks_like_PEM(text):\n\n i = text.find(\"-----BEGIN \")\n return i >= 0 and text.find(\"\\n-----END \", i) > i", "def from_body(self, body):\n chunk = body[:5000]\n chunk = to_bytes(chunk)\n if not binary_is_text(chunk):\n return self.from_mimetype('application/octet-stream')\n elif b\"<html>\" in chunk.lower():\n return self.from_mimetype('text/html')\n elif b\"<?xml\" in chunk.lower():\n return self.from_mimetype('text/xml')\n else:\n return self.from_mimetype('text')", "def user_reply_filter(self, text):\n return not text.lower().startswith('@')", "def convert(self,message):\n \n content_type = message.get('content',{}).get('@type','')\n if content_type in self.supported:\n result = getattr(self.tconv, content_type)(message)\n else:\n return False\n \n return result", "def dissect(self, text):", "def _check_with_content(params):\r\n if 'with_content' in params and params['with_content'] != 'false':\r\n return True\r\n else:\r\n return False", "def is_sms_valid(text=''):\n try:\n text.decode('ascii')\n except:\n return False\n if len(text) > 160:\n return False\n\n return True", "def test_parse_email(self):\n self.email.open_path = './original_email.txt'\n self.email.open_email()\n self.email.parse_email()\n sample_email = open('./parsed_email.txt', 'r')\n parsed_email = sample_email.read()\n sample_email.close()\n self.assertMultiLineEqual(parsed_email, self.email.parsed_email)", "def is_file_containing_malicious_content(self, content: str, file_name: str):\n\n for suspicious_string in self.stringsCausingSuspicions:\n # if it's a regexp\n if str(suspicious_string.__class__) == \"<class '_sre.SRE_Pattern'>\":\n if len(suspicious_string.findall(content)) > 0:\n return True\n\n continue\n\n # regular, simple string check\n if suspicious_string in content:\n return True\n\n return False", "def guess(path: pathlib.Path) -> str:\n return mime_map.get(Magic(mime=True).from_file(str(path)), \"text\")", "def getMime(filename):\n line = mimeDB.file(filename)\n if line is not None:\n parts = line.split(';')\n mime = parts[0].strip()\n if mime.find('/')==-1:\n mime = 'application/octet-stream'\n elif mime == 'text/html' and \\\n (filename[-5:].lower() == '.xslt' or filename[-4:].lower() == '.xsl'):\n # @notice: workaround for the broken mime detection on debian\n # @todo: fix the real problem and remove this code\n mime = 'application/xml'\n encoding = None\n if len(parts)==2:\n encoding = parts[1][9:]\n return mime, encoding\n return None, None", "def post_process(text):\n # XXX update to spit out HTML - no need for requests GDocs can take html\n verbose = False\n request_list = []\n chars = iter(text)\n normal_text = []\n knownsigils = {\"end\":('',\"NONE\"),\n \"^\": (\"0123456789+-\",\"SUPERSCRIPT\"),\n \"_\": (\"0123456789\",\"SUBSCRIPT\")\n }\n c = next(chars, \"end\")\n while (True):\n if (c in knownsigils.keys()):\n if len(normal_text): request_list.append((''.join(normal_text), \"NORMAL\"))\n normal_text.clear()\n (c,token) = _gettoken(c,chars,knownsigils)\n if (token is not None): request_list.append(token)\n if (c==\"end\"):\n break\n else:\n continue\n else:\n normal_text.append(c)\n c = next(chars, \"end\")\n return request_list", "def remove_extra_text(self, text):\n if text:\n parsed_text = text\n if parsed_text.find('== Referencias ==') > 0:\n parsed_text = parsed_text[:parsed_text.find('== Referencias ==\\n')]\n if parsed_text.find('== Fuentes ==') > 0:\n parsed_text = parsed_text[:parsed_text.find('== Fuentes ==\\n')]\n if parsed_text.find('== Fuente ==') > 0:\n parsed_text = parsed_text[:parsed_text.find('== Fuente ==\\n')]\n if parsed_text.find('== Ver también =='.decode('utf-8')) > 0:\n parsed_text = parsed_text[:parsed_text.find('== Ver también ==\\n'.decode('utf-8'))]\n if parsed_text.find(\"== Noticia relacionada ==\".decode(\"utf-8\")) > 0:\n parsed_text = parsed_text[:parsed_text.find(\"== Noticia relacionada ==\".decode('utf-8'))]\n if parsed_text.find(\"== Artículos relacionados ==\".decode(\"utf-8\")) > 0:\n parsed_text = parsed_text[:parsed_text.find(\"== Artículos relacionados ==\".decode('utf-8'))]\n if parsed_text.find(\"== Enlace externo ==\".decode(\"utf-8\")) > 0:\n parsed_text = parsed_text[:parsed_text.find(\"== Enlace externo ==\".decode('utf-8'))]\n if parsed_text.find(\"== Enlaces externos ==\".decode(\"utf-8\")) > 0:\n parsed_text = parsed_text[:parsed_text.find(\"== Enlaces externos ==\".decode('utf-8'))]\n parsed_text = parsed_text.replace('ABr)', '')\n return parsed_text", "def is_readable(self, content_type):\n return False", "def check_email_required(document_text):\n if \"visitor[email]\" in document_text:\n return True\n else:\n return False", "def ISTEXT(value):\n return isinstance(value, (basestring, AltText))", "def _single_body(part):\n content_type = part.get_content_type()\n try:\n body = part.get_payload(decode=True)\n except Exception:\n return ''\n\n if content_type == 'text/html':\n return BeautifulSoup(body, 'html.parser').text\n elif content_type == 'text/plain':\n return body\n return ''", "def parse(self, content):\n pass", "def guess_type(content):\n global mimeLock\n global mimeInitialized\n\n if not mimeInitialized:\n with mimeLock:\n if not mimeInitialized:\n mimetypes.init()\n mimeInitialized = True\n guessed = mimetypes.guess_type(content)\n\n if guessed[1] is None:\n guessed = (guessed[0], \"\")\n\n return guessed", "def valid_xss_content_type(http_res):\n # When no content-type is returned, browsers try to display the HTML\n if \"content-type\" not in http_res.headers:\n return True\n\n # else only text/html will allow javascript (maybe text/plain will work for IE...)\n if \"text/html\" in http_res.headers[\"content-type\"]:\n return True\n return False", "def has_mixed_content(self) -> bool:\n raise NotImplementedError()", "def audio_content_filter(item):\n return item.media_content_type.startswith(\"audio/\")", "def _check_has_message(data):\r\n return re.match(r'^:[a-zA-Z0-9_]+\\![a-zA-Z0-9_]+@[a-zA-Z0-9_]+'\r\n r'\\.tmi\\.twitch\\.tv '\r\n r'PRIVMSG #[a-zA-Z0-9_]+ :.+$', data)", "def _assertEmailParsedCorrectly(self, email, data):\n self.assertIsInstance(email, EmailMultiAlternatives)\n self.assertEqual(email.to, data.get('recipient', '').split(','))\n self.assertEqual(email.from_email, data.get('sender', ''))\n self.assertEqual(email.subject, data.get('subject', ''))\n self.assertEqual(email.body, \"%s\\n\\n%s\" % (\n data.get('stripped-text', ''),\n data.get('stripped-signature', '')\n ))\n self.assertEqual(email.cc, data.get('cc', '').split(','))\n self.assertEqual(email.bcc, data.get('bcc', '').split(','))\n if 'html' in data:\n self.assertEqual(len(email.alternatives), 1)\n self.assertEqual(email.alternatives[0][0], data.get('stripped-html', ''))", "def has_text(self):\n try:\n first = self.text_planets()[0]\n except IndexError:\n first = None\n\n return first is not None", "def tweet_is_valid(status):\n if not status.has_key('text'):\n return False\n\n text = status['text'].encode('utf-8')\n pattern = re.compile('.*(^RT|https?.*|@|[Ll]onely\\s[Ii]sland)')\n if not re.match(pattern, text):\n return True\n else:\n return False", "def get_html_part(parts):\n for part in parts:\n if part[\"mimeType\"] == \"text/html\":\n return part[\"body\"][\"data\"]\n return \"\"", "def text_exists(self, text: str)-> bool:\n result = self.__content.find(text)\n if result == -1:\n return False\n else:\n return True", "def parse_email(filename):\n with open(filename, \"rb\") as email:\n data = email.read()\n if isinstance(data, bytes):\n content = BytesParser().parsebytes(data)\n else:\n content = Parser().parsestr(data)\n return content", "def chunk_in_text(chunk, text):\n chunk = clean_chunk(chunk)\n return text.find(chunk) >= 0", "def payload_parse(self, mail):\n\t\tif mail.is_multipart():\n\t\t\tfor payload in mail.get_payload():\n\t\t\t\tif payload.get_content_maintype() == \"multipart\":\n\t\t\t\t\tself.payload_parse(payload)\n\t\t\t\telse:\n\t\t\t\t\tself.payload_handle(payload, mail)\n\t\t\t# Post deletion of payloads:\n\t\t\tself.payload_delete(mail)", "def parse_mime(self, mtype):\n parts = mtype.split(';')\n params = OrderedDict()\n\n # Split parameters and convert numeric values to a Decimal object.\n for k, v in [param.split('=', 1) for param in parts[1:]]:\n k = k.strip().lower()\n v = v.strip().strip('\\'\"')\n\n if self._parm_val_lower:\n v = v.lower()\n\n try:\n v = Decimal(v)\n except InvalidOperation:\n if k == 'q':\n v = Decimal(\"1.0\")\n\n params[k] = v\n\n # Add/fix quality values.\n quality = params.get('q')\n\n if ('q' not in params\n or quality > Decimal(\"1.0\")\n or quality < Decimal(\"0.0\")):\n params['q'] = Decimal(\"1.0\")\n\n full_type = parts[0].strip().lower()\n\n # Fix non-standard single asterisk.\n if full_type == '*':\n full_type = '*/*'\n\n type, sep, subtype = full_type.partition('/')\n\n if '+' in subtype:\n idx = subtype.rfind('+')\n suffix = subtype[idx+1:].strip()\n subtype = subtype[:idx]\n else:\n suffix = ''\n\n return type.strip(), subtype.strip(), suffix, params", "def isSpam(textLine):\n\treturn True", "def is_good_response(self, resp):\r\n content_type = resp.headers['Content-Type'].lower()\r\n return (resp.status_code == 200 and content_type is not None and content_type.find('html') > -1)", "def isSupportedContent(cls, fileContent):\n magic = bytearray(fileContent)[:4]\n magics = (\n p('>I', 0xfeedface),\n p('>I', 0xfeedfacf),\n p('>I', 0xcafebabe),\n\n p('<I', 0xfeedface),\n p('<I', 0xfeedfacf),\n p('<I', 0xcafebabe),\n )\n return magic in magics", "def read(text):\n text = str(text)\n if not helpers.contains_only_phonetic_chars(text):\n raise NonSupportedTextException()\n return _process_replacements(text)", "def check_message(self, message: str) -> bool:\n processed_message = message.lower()\n\n for _, module in self.modules.get_modules():\n if not module.is_loaded:\n continue\n\n for _, reg_list in module.module_settings.templates.items():\n find_match = any(\n [re.findall(reg, processed_message) for reg in reg_list]\n )\n if find_match:\n return True\n return False", "def is_spam(code):\n urlfind = re.compile(\"[A-Za-z]+://.*?\")\n urls = urlfind.findall(code)\n url_count = len(urls)\n\n if url_count > URLS_LIMIT:\n return True\n else:\n word_count = len(code.split())\n urls_percentage = (url_count * 100) / word_count\n\n if urls_percentage > URLS_PERCENTAGE:\n return True\n else:\n return False", "def get_bare_file(filename):\n \"\"\" for a given entry, finds all of the info we want to display \"\"\"\n f = open(filename, 'r')\n str = f.read()\n str = str.decode('utf-8')\n e = {}\n try: e['title'] = re.search('(?<=title:)(.)*', str).group()\n except: pass\n try: e['slug'] = re.search('(?<=slug:)(.)*', str).group()\n except: pass\n try: e['summary'] = re.search('(?<=summary:)(.)*', str).group()\n except: pass\n try:\n e['content'] =re.search('(?<=content:)((?!category:)(?!published:)(.)|(\\n))*', str).group()\n if e['content'] == None:\n e['content'] = re.search('(?<=content:)((.)|(\\n))*$', str).group()\n except:\n pass\n try:\n e['published'] = re.search('(?<=published:)(.)*', str).group()\n except: pass\n try: e['author'] = re.search('(?<=author:)(.)*', str).group()\n except: pass\n try: e['category'] = re.search('(?<=category:)(.)*', str).group()\n except: pass\n try: e['url'] = re.search('(?<=url:)(.)*', str).group()\n except: pass\n try:\n e['uid'] = re.search('(?<=u-uid:)(.)*', str)\n if e['uid']:\n e['uid'] = e['uid'].group()\n else:\n e['uid'] = re.search('(?<=u-uid)(.)*', str).group()\n except: pass\n try: e['time-zone'] = re.search('(?<=time-zone:)(.)*', str).group()\n except: pass\n try: e['location'] = re.search('(?<=location:)(.)*', str).group()\n except: pass\n try: e['syndication'] = re.search('(?<=syndication:)(.)*', str).group()\n except: pass\n try: e['location_name'] = re.search('(?<=location-name:)(.)*', str).group()\n except: pass\n try: e['in_reply_to'] = re.search('(?<=in-reply-to:)(.)*', str).group()\n except:pass\n return e", "def mime_allow_empty(self) -> ConfigNodePropertyBoolean:\n return self._mime_allow_empty", "def octetparts(msg):\n return set(filter(lambda part:\n part.get_content_type() == 'application/octet-stream',\n msg.walk()))", "def check_text(resp, needle):\n\n if needle not in resp.text:\n raise MiteError(f\"Couldn't find \\\"{needle}\\\" in the response.\")" ]
[ "0.6001141", "0.5973282", "0.5906038", "0.5895969", "0.5820583", "0.5703178", "0.5675198", "0.5666455", "0.5633167", "0.55806506", "0.55011463", "0.5480535", "0.5429232", "0.53972876", "0.5385626", "0.5375632", "0.53742987", "0.53600854", "0.53416246", "0.5340039", "0.5295856", "0.52867484", "0.5284102", "0.5271993", "0.525556", "0.52521545", "0.5249628", "0.5248567", "0.52463645", "0.5242685", "0.52290356", "0.5228536", "0.52213514", "0.519574", "0.5191198", "0.51902795", "0.5187303", "0.5185752", "0.5168724", "0.5160549", "0.5156289", "0.51511383", "0.51502144", "0.5142549", "0.5142199", "0.5138681", "0.5133168", "0.51183486", "0.5113381", "0.5108853", "0.51083577", "0.5101811", "0.50796807", "0.5075393", "0.5068799", "0.50665706", "0.5059132", "0.50437313", "0.504012", "0.50265956", "0.5020815", "0.50162876", "0.501272", "0.5009992", "0.50020266", "0.4996466", "0.49905118", "0.49874687", "0.49845943", "0.49689707", "0.49651656", "0.49561617", "0.49538937", "0.4949789", "0.49494183", "0.49406397", "0.49367765", "0.4930899", "0.49278405", "0.492223", "0.49118474", "0.49048543", "0.49042624", "0.4903113", "0.48982677", "0.48978704", "0.4894801", "0.48894465", "0.4884987", "0.48848635", "0.4880139", "0.48789072", "0.48705402", "0.48666075", "0.48645335", "0.4858349", "0.4856377", "0.48524997", "0.4851462", "0.48455924" ]
0.57026833
6
Some dumps of hotmail messages introduce weird line breaks into header content, making them impossible to parse. This function will fix this content.
def fix_broken_hotmail_headers(text): end_of_header_match = _end_of_simple_header_pattern.search(text) temp_header_text = text[:end_of_header_match.end()].strip() lines = temp_header_text.splitlines()[1:] # first line is not a header... fixed_header_lines = reduce(_merge_broken_header_lines, lines, []) return_text = os.linesep.join(fixed_header_lines) + text[end_of_header_match.end():] return return_text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fix_broken_yahoo_headers(text):\n end_of_header_match = _end_of_multipart_header_pattern.search(text)\n temp_header_text = text[:end_of_header_match.end()].strip()\n lines = temp_header_text.splitlines()\n fixed_header_lines = reduce(_merge_broken_header_lines, lines, [])\n return_text = os.linesep.join(fixed_header_lines) + '\\r\\n\\r\\n' + text[end_of_header_match.end():]\n return return_text", "def clean_header(klass, s):\n return re.sub(r\"[\\n\\r\\t]+\", \" \", s).strip()", "def normalizeRawFromHeader(value):\n return value.replace('\\n', '').replace('\\r', '').strip()", "def _preprocess(self, source):\n source = source.replace(u'\\n', u'').strip()\n source = re.sub(r'<br\\s*\\/?\\s*>', u' ', source, re.I)\n source = re.sub(r'\\s\\s+', u' ', source)\n return source", "def process_message(message):\r\n message = gensim.utils.to_unicode(message, 'latin1').strip()\r\n blocks = message.split(u'\\n\\n')\r\n # skip email headers (first block) and footer (last block)\r\n content = u'\\n\\n'.join(blocks[1:])\r\n return content", "def __prepare_content(self, content):\n if isinstance(content, str):\n content = content.encode('utf-8')\n return b\"{%d+}%s%s\" % (len(content), CRLF, content)", "def clean(row):\r\n for v in row:\r\n \tv = v.replace(\"\\xef\\xbb\\xbf\",\"\")\r\n return row", "def _headercorrected(hdr):\n # COM*** -> COMMENT\n i = 1\n while 'COM%03d' % i in hdr:\n value = hdr['COM%03d' % i]\n comment = hdr.cards['COM%03d' % i].comment\n hdr['COMMENT'] = '[%s] %s' % (comment, value)\n del hdr['COM%03d' % i]\n i += 1\n # HIST*** -> HISTORY\n i = 1\n while 'HIST%03d' % i in hdr:\n value = hdr['HIST%03d' % i]\n comment = hdr.cards['HIST%03d' % i].comment\n hdr['HISTORY'] = '%s (%s)' % (value, comment)\n del hdr['HIST%03d' % i]\n i += 1\n # ORIGIN -> FROM\n if 'ORIGIN' in hdr.keys():\n hdr.rename_keyword('ORIGIN', 'FROM')\n if 'ORIGIN_V' in hdr.keys():\n hdr.rename_keyword('ORIGIN_V', 'FROM_V')\n # SOURCE_V -> FORMAT\n if 'SOURCE_V' in hdr.keys():\n hdr.rename_keyword('SOURCE_V', 'FORMAT')\n # SRC_VERS -> SRC_V\n if 'SRC_VERS' in hdr.keys():\n hdr.rename_keyword('SRC_VERS', 'SRC_V')", "def clean_content(self) -> str:", "def process_content(self, content):\n content = re.sub('\\[\\d+\\]', '', content)\n content = re.sub('\\n+', '\\n', content)\n content = re.sub(r'Bài liên quan:.*\\n', '', content)\n return content.strip()", "def _fixHeaderLength(self):\n self.header.seek(0)\n lines = self.header.readlines()\n headlength = len(lines)\n lines[0] = wrapLine(\"NLHEAD_FFI\", self.annotation, self.delimiter, \"%d%s%d\\n\" % (headlength, self.delimiter, self.FFI))\n self.header = StringIO(\"\".join(lines))\n self.header.seek(0)", "def clean_chunk(chunk):\n return '\\n'.join([x[1:] for x in chunk.split('\\n')\n if x and x[0] not in ('-', '@')])", "def __stripEol(self, txt):\n return txt.replace(\"\\r\", \"\").replace(\"\\n\", \"\")", "def cleaned_contents(self):\n snip_with_code = re.compile(\"(//.*snip(\\-file)*:?.*\\n)(\\+\\n)?(\\[.*\\]\\n)*----\\n(.*\\n)*?----\\n\", flags=re.IGNORECASE)\n cleaned = re.sub(snip_with_code, r'\\1', self.contents)\n return cleaned", "def fix_header(po):\r\n\r\n # By default, django-admin.py makemessages creates this header:\r\n #\r\n # SOME DESCRIPTIVE TITLE.\r\n # Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER\r\n # This file is distributed under the same license as the PACKAGE package.\r\n # FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.\r\n\r\n po.metadata_is_fuzzy = [] # remove [u'fuzzy']\r\n header = po.header\r\n fixes = (\r\n ('SOME DESCRIPTIVE TITLE', EDX_MARKER),\r\n ('Translations template for PROJECT.', EDX_MARKER),\r\n ('YEAR', str(datetime.utcnow().year)),\r\n ('ORGANIZATION', 'edX'),\r\n (\"THE PACKAGE'S COPYRIGHT HOLDER\", \"EdX\"),\r\n (\r\n 'This file is distributed under the same license as the PROJECT project.',\r\n 'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.'\r\n ),\r\n (\r\n 'This file is distributed under the same license as the PACKAGE package.',\r\n 'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.'\r\n ),\r\n ('FIRST AUTHOR <EMAIL@ADDRESS>', 'EdX Team <info@edx.org>'),\r\n )\r\n for src, dest in fixes:\r\n header = header.replace(src, dest)\r\n po.header = header", "def rfc822_escape(header):\n lines = header.split('\\n')\n sep = '\\n' + 8 * ' '\n return sep.join(lines)", "def cleanup_raw_data(buf):\n raw = str(buf, encoding='iso-8859-1').strip()\n records = raw.splitlines()\n return records", "def _force_CRLF(self, data):\n return CRLF_RE.sub('\\r\\n', data)", "def prepare_msg(raw_message):\n\n raw_message = str(raw_message)\n\n raw_message = raw_message.lower()\n raw_message = raw_message.replace(\"bismarkb1996\", \"\")\n raw_message = raw_message.replace(\"id336383265\", \"\")\n raw_message = re.sub('[^а-яА-Яa-zA-Z0-9\\\\s\\\\-]+', '', raw_message)\n\n split_message = raw_message.split(\" \")\n logger.debug(\"Split message: \" + str(split_message))\n\n message = []\n for msg in [x.split(\"-\") for x in split_message]:\n for i in msg:\n if i != \"\":\n message.append(i)\n\n return message", "def fix_page_content(filename, content):\n return JournalStaticPage(filename, content).body", "def _remove_new_line(self, message):\n if message.endswith('\\n'):\n return message[:-1]\n return message", "def _collapse_leading_ws(header, txt):\n if header.lower() == 'description': # preserve newlines\n return '\\n'.join([x[8:] if x.startswith(' ' * 8) else x\n for x in txt.strip().splitlines()])\n else:\n return ' '.join([x.strip() for x in txt.splitlines()])", "def normalize(self):\n self.header.set_length(self.body)", "def convert_content(contents):\n replacement = convert_timestamp(contents)\n replacement = convert_header(replacement)\n replacement = re.sub(r\"<c[.\\w\\d]*>\", \"\", replacement)\n replacement = re.sub(r\"</c>\", \"\", replacement)\n replacement = re.sub(r\"<\\d\\d:\\d\\d:\\d\\d.\\d\\d\\d>\", \"\", replacement)\n replacement = re.sub(r\"::[\\-\\w]+\\([\\-.\\w\\d]+\\)[ ]*{[.,:;\\(\\) \\-\\w\\d]+\\n }\\n\", \"\", replacement)\n replacement = re.sub(r\"Style:\\n##\\n\", \"\", replacement)\n replacement = add_sequence_numbers(replacement)\n return replacement", "def convertToCrLf(fileContent):\n if fileContent is None:\n return None\n fileContent = fileContent.replace( WIN_NEW_LINE,'\\n')\n fileContent = fileContent.replace('\\r','\\n')\n fileContent = fileContent.replace('\\n', WIN_NEW_LINE)\n return fileContent", "def _header_transformer(self, lines):\n needle = b'--%s\\n' % self.boundary\n in_header = False\n for line in lines:\n if line == needle:\n in_header = True\n if in_header:\n assert line[-1] == b'\\n'\n line = line[:-1] + b'\\r\\n'\n if line == b'\\r\\n':\n in_header = False\n yield line", "def prep_difflines(content):\n return [ x+\"\\n\" for x in content.split(\"\\n\") ]", "def clean_body(s):\n patbal = re.compile('<.*?>',flags = re.S)\n patspace = re.compile('\\W+',flags = re.S)\n return re.sub(patspace,' ',re.sub(patbal,'',s))", "def clean_body(s):\n patbal = re.compile('<.*?>',flags = re.S)\n patspace = re.compile('\\W+',flags = re.S)\n return re.sub(patspace,' ',re.sub(patbal,'',s))", "def convert_header(contents):\n replacement = re.sub(r\"WEBVTT\\n\", \"\", contents)\n replacement = re.sub(r\"Kind:[ \\-\\w]+\\n\", \"\", replacement)\n replacement = re.sub(r\"Language:[ \\-\\w]+\\n\", \"\", replacement)\n return replacement", "def cleanup(self, body):\n body = re.sub(\"&gt;\", \">\", body) # Recode HTML codes\n body = re.sub(\"&lt;\", \"<\", body)\n body = re.sub(\"&amp;\", \"&\", body)\n body = re.sub(\"&nbsp;\", \" \", body)\n # body = re.sub(\"^\\[deleted\\]$\", \"\", body) # [deleted] and [removed]: delete entire row from dataframe\n body = re.sub(\"http\\S+\", \" \", body) # Remove URL\n body = re.sub(\"/r/\\S+|/u/\\S+\", \" \", body) # Remove /r/subreddit, /u/user\n # body = re.sub(\"(>.*?\\\\n\\\\n)+\", \" \", body) # Remove quoted comments\n # body = re.sub(\"[[:cntrl:]]\", \" \", body) # Remove control characters (\\n, \\b) doesn't work for unicode\n body = \"\".join(ch for ch in body if unicodedata.category(ch)[0]!=\"C\") # Remove control characters (\\n, \\b) etc.\n body = re.sub(\"'\", \"\", body) # Remove single quotation marks (contractions)\n # body = re.sub(\"[[:punct:]]\", \" \", body) # Remove punctuation\n body = \"\".join(ch for ch in body if unicodedata.category(ch)[0]!=\"P\") # Remove punctuation\n body = re.sub(\"\\\\s+\", \" \", body) # Replace multiple spaces with single space\n body = body.strip()\n body = body.lower() # Lower case\n return body # Return body (cleaned up text)", "def __remove_break_line__(self, string):\n return string.rstrip()", "def prepare_content(content):\n \n result = []\n for line in content.splitlines():\n # turn urls into actual links\n match = find_url.match(line)\n if match:\n line = '{0}[{1}]({1}){2}'.format(match.group(1), match.group(2), match.group(3))\n\n # TBD: escape '|' character to protect table integrity.\n \n # correct heading levels (add two)\n if line.startswith('#') and line.endswith('#'):\n result.append('##{0}##'.format(unlines(line)))\n else:\n result.append(line)\n \n return '<br>'.join(result)", "def decode_header(header):\n new_header = {}\n\n for item in header:\n split = item.split('\\t')\n new_header[split[0].replace(':', '')] = split[1].replace(\"\\r\\n\", \"\")\n\n return new_header", "def break_long_headers(header):\n if len(header) > 160 and ',' in header:\n header = mark_safe('<br> ' + ', <br>'.join(header.split(',')))\n return header", "def minimalTextCleaning(row, field):\n\n # force encoding\n encoded_text = row[field].encode(encoding = 'ascii',errors = 'replace')\n decoded_text = encoded_text.decode(encoding='ascii',errors='strict')\n remove_funky_chars = str(decoded_text).replace(\"?\", \" \")\n lower_case = str(remove_funky_chars).lower().strip()\n\n # strip redundant whitespace\n cleaned_text = re.sub(' +', ' ', lower_case)\n\n\n # strip signature lines\n cleaned_text = cleaned_text.replace(\"_\", \"\")\n\n return cleaned_text", "def clean_hanging_newline(t):\n if t and t[-1] == \"\\n\":\n return t[:-1]\n return t", "def no_blank_line_before_section(): # noqa: D416", "def pre_process_text_block(block):\n block['content'] = block['content'].strip()", "def parse_header(self):\n bodystart=re.compile(r\"<body>\", re.IGNORECASE).search(self.lines).span()[1]\n oldheader=self.lines[0:bodystart]\n start=re.compile(\"<title>\", re.IGNORECASE).search(oldheader).span()[1]\n finish=re.compile(\"</title>\", re.IGNORECASE).search(oldheader).span()[0]\n titles=oldheader[start:finish].split(\"--\")\n # Duplicate if needed\n if len(titles)==1: titles.append(titles[0])\n self.title, self.header= {}, {}\n for i, lang in enumerate(LANGS):\n self.title[lang]=titles[i]\n self.header[lang]=\"%s%s%s\" % (oldheader[:start], self.title[lang], oldheader[finish:],)", "def clean_line(self, line):\n\n if \"#\" in line:\n temp = line.split(\"#\")\n if len(temp) < 2:\n return \"\"\n else:\n temp = temp[0] + \"\\n\"\n\n # make sure the \"#\" isn't in quotes\n if temp.count(\"\\\"\") % 2 == 0:\n line = temp\n\n line = line.replace(\"}\", \" } \").replace(\"{\", \" { \")\n while \"=\" in line:\n line = self.replace_equals(line)\n line = line.lstrip()\n return line", "def test_normalize_linefeeds():\n text = \"\"\"show hostname\\r\nshow version\\r\\r\nshow inventory\\r\\r\\r\nshow interfaces\n\\r\"\"\"\n expected = \"\"\"show hostname\nshow version\nshow inventory\nshow interfaces\n\"\"\"\n connection = FakeBaseConnection(RESPONSE_RETURN=\"\\n\")\n result = connection.normalize_linefeeds(text)\n assert result == expected", "def clean_smile(self, smi):\n smi = smi.replace('\\n', '')\n return smi", "def clean_data(td):\n data = td.string\n try:\n return data.strip(\" \\n:-\")\n except AttributeError:\n return u\"\"", "def normalize_newlines(text):\n return re.sub(\"\\r\\n|\\r\", \"\\n\", text)", "def cleanup_newlines(string):\n return string.replace(\"\\r\\n\", \"\\r\").replace(\"\\n\", \"\\r\")", "def clean_message_md(self):\n message_md = self.cleaned_data[\"message_md\"]\n lines = filter(None, message_md.splitlines())\n message_md = \" \".join(lines)\n return message_md", "def fix_hppos(f):\n with open(f, 'r+') as file:\n d = file.readlines()\n file.seek(0)\n for i in d[:-1]: # Write all but last line\n file.write(i)\n l = d[-1].split(' ')\n if len(l) == 5: # If final line is complete, write it too\n file.write(d[-1])\n file.truncate() # Remove bad stuff", "def no_underline_and_no_newline(): # noqa: D416", "def return_text_without_headlines(text):\n\n text = text.replace('\\\\n', '\\n')\n text = text.replace('\\\\r', '\\r')\n text = re.sub('h1. (.*)\\r', '', text)\n text = re.sub('h2. (.*)\\r', '', text)\n text = re.sub('h2. (.*)', '', text)\n text = re.sub('h3. (.*)\\r', '', text)\n text = re.sub('h4. (.*)\\r', '', text)\n text = text.replace('*acceptance criteria:*', \"\")\n text = text.replace('*acceptance criteria*:', \"\")\n text = text.replace('*acceptance criteria*', \"\")\n text = text.replace('*story:*', \"\")\n text = text.replace('*story*:', \"\")\n text = text.replace('*story*', \"\")\n text = text.replace('*stories:*', \"\")\n text = text.replace('*questions:*', \"\")\n text = text.replace('*questions*:', \"\")\n text = text.replace('*questions*', \"\")\n text = text.replace('*implementation notes:*', \"\")\n text = text.replace('*implementation notes*:', \"\")\n text = text.replace('*implementation notes*', \"\")\n text = text.replace('*notes:*', \"\")\n text = text.replace('*notes*:', \"\")\n text = text.replace('*notes*', \"\")\n text = text.replace('*Acceptance Criteria:*', \"\")\n text = text.replace('*Acceptance Criteria*:', \"\")\n text = text.replace('*Acceptance Criteria*', \"\")\n text = text.replace('*Story:*', \"\")\n text = text.replace('*Story*:', \"\")\n text = text.replace('*Story*', \"\")\n text = text.replace('*Stories:*', \"\")\n text = text.replace('*Questions:*', \"\")\n text = text.replace('*Questions*:', \"\")\n text = text.replace('*Questions*', \"\")\n text = text.replace('*Implementation Notes:*', \"\")\n text = text.replace('*Implementation Notes*:', \"\")\n text = text.replace('*Implementation Notes*', \"\")\n text = text.replace('*Notes:*', \"\")\n text = text.replace('*Notes*:', \"\")\n text = text.replace('*Notes*', \"\")\n text = text.replace('*Acceptance criteria:*', \"\")\n text = text.replace('*Acceptance criteria*:', \"\")\n text = text.replace('*Acceptance criteria*', \"\")\n text = text.replace('*Implementation notes:*', \"\")\n text = text.replace('*Implementation notes*:', \"\")\n text = text.replace('*Implementation notes*', \"\")\n text = text.replace('*Acceptance Criteria:*', \"\")\n text = text.replace('*Acceptance Criteria*:', \"\")\n text = text.replace('*Acceptance Criteria*', \"\")\n text = text.replace('*Implementation Notes:*', \"\")\n text = text.replace('*Implementation Notes*:', \"\")\n text = text.replace('*Implementation Notes*', \"\")\n text = text.replace(':\\r\\n****', \" \")\n text = text.replace('\\r\\n****', \". \")\n text = text.replace(':\\n****', \" \")\n text = text.replace('\\n****', \". \")\n text = text.replace(':\\r\\n***', \" \")\n text = text.replace('\\r\\n***', \". \")\n text = text.replace(':\\n***', \" \")\n text = text.replace('\\n***', \". \")\n text = text.replace(':\\r\\n**', \" \")\n text = text.replace('\\r\\n**', \". \")\n text = text.replace(':\\n**', \" \")\n text = text.replace('\\n**', \". \")\n text = text.replace(':\\r\\n*', \" \")\n text = text.replace('\\r\\n*', \". \")\n text = text.replace(':\\n*', \" \")\n text = text.replace('\\n*', \". \")\n text = text.replace(':\\r\\n\\r\\n', \" \")\n text = text.replace('\\r\\n\\r\\n', \". \")\n text = text.replace(':\\r\\n', \" \")\n text = text.replace('\\r\\n', \". \")\n text = text.replace('.\\n', \". \")\n text = text.replace('\\n', \" \")\n text = text.replace('.\\r', \". \")\n text = text.replace('\\r', \" \")\n text = text.replace('\\\\n', '\\n')\n text = text.replace('\\\\t', '\\t')\n text = text.replace('\\\\r', '\\r')\n text = text.replace('\\n', \" \")\n text = text.replace('\\r', \" \")\n text = text.replace('\\t', \" \")\n text = ' '.join(text.split())\n return text", "def prepare_text_line(line):\n\n re_sub = re.sub\n # FIXME: maintain the original character positions\n\n # strip whitespace\n line = line.strip()\n\n # strip comment markers\n # common comment characters\n line = line.strip('\\\\/*#%;')\n # un common comment line prefix in dos\n line = re_sub('^rem\\s+', ' ', line)\n line = re_sub('^\\@rem\\s+', ' ', line)\n # un common comment line prefix in autotools am/in\n line = re_sub('^dnl\\s+', ' ', line)\n # un common comment line prefix in man pages\n line = re_sub('^\\.\\\\\\\\\"', ' ', line)\n # un common pipe chars in some ascii art\n line = line.replace('|', ' ')\n\n # normalize copyright signs and spacing aournd them\n line = line.replace('(C)', ' (c) ')\n line = line.replace('(c)', ' (c) ')\n # the case of \\251 is tested by 'weirdencoding.h'\n line = line.replace(u'\\251', u' (c) ')\n line = line.replace('&copy;', ' (c) ')\n line = line.replace('&#169;', ' (c) ')\n line = line.replace('&#xa9;', ' (c) ')\n line = line.replace(u'\\xa9', ' (c) ')\n # FIXME: what is \\xc2???\n line = line.replace(u'\\xc2', '')\n\n # TODO: add more HTML entities replacements\n # see http://www.htmlhelp.com/reference/html40/entities/special.html\n # convert html entities &#13;&#10; CR LF to space\n line = line.replace(u'&#13;&#10;', ' ')\n line = line.replace(u'&#13;', ' ')\n line = line.replace(u'&#10;', ' ')\n\n # normalize (possibly repeated) quotes to unique single quote '\n # backticks ` and \"\n line = line.replace(u'`', \"'\")\n line = line.replace(u'\"', \"'\")\n line = re.sub(MULTIQUOTES_RE(), \"'\", line)\n # quotes to space? but t'so will be wrecked\n # line = line.replace(u\"'\", ' ')\n\n # some trailing garbage ')\n line = line.replace(\"')\", ' ')\n\n\n # note that we do not replace the debian tag by a space: we remove it\n line = re_sub(DEBIAN_COPYRIGHT_TAGS_RE(), '', line)\n\n line = re_sub(IGNORED_PUNCTUATION_RE(), ' ', line)\n\n # tabs to spaces\n line = line.replace('\\t', ' ')\n\n # normalize spaces around commas\n line = line.replace(' , ', ', ')\n\n # remove ASCII \"line decorations\"\n # such as in --- or === or !!! or *****\n line = re_sub(ASCII_LINE_DECO_RE(), ' ', line)\n line = re_sub(ASCII_LINE_DECO2_RE(), ' ', line)\n\n # Replace escaped literal \\0 \\n \\r \\t that may exist as-is by a space\n # such as in code literals: a=\"\\\\n some text\"\n line = line.replace('\\\\r', ' ')\n line = line.replace('\\\\n', ' ')\n line = line.replace('\\\\t', ' ')\n line = line.replace('\\\\0', ' ')\n\n # TODO: Why?\n # replace contiguous spaces with only one occurrence\n # line = re.sub(WHITESPACE_RE(), ' ', text)\n\n # normalize to ascii text\n line = commoncode.text.toascii(line)\n # logger.debug(\"ascii_only_text: \" + text)\n\n # strip verbatim back slash and comment signs again at both ends of a line\n # FIXME: this is done at the start of this function already\n line = line.strip('\\\\/*#%;')\n\n # normalize to use only LF as line endings so we can split correctly\n # and keep line endings\n line = commoncode.text.unixlinesep(line)\n # why?\n line = lowercase_well_known_word(line)\n\n return line", "def clean_html(message):\n all_lines = []\n started_html = False\n finished_with_html_tag = False\n html_part = []\n for idx, line in enumerate(message.split(\"\\n\")):\n if re.search(r\"<.*?html.*?>\", line):\n started_html = True\n html_part.append(line)\n else:\n if started_html:\n html_part.append(line)\n else:\n all_lines.append(line)\n if \"</html>\" in line:\n finished_with_html_tag = True\n if finished_with_html_tag:\n all_lines.append(clean_text_from_html_tags(\"\\n\".join(html_part)))\n html_part = []\n finished_with_html_tag = False\n started_html = False\n if len(html_part) > 0:\n all_lines.extend(html_part)\n return delete_empty_lines(\"\\n\".join(all_lines))", "def clean(line):\n line = line.strip('\\n').strip()\n line = line.replace('\\xe2\\x80\\x93', '-')\n line = line.replace('\\xe2\\x80\\x99', '\\'')\n\n return line", "def _prepare_text(body):\n text = body.lower()\n text = text.replace('\\n', ' ')\n regex = re.compile('[^a-z ]')\n return regex.sub('', text)", "def fix_header(file_path):\n logging.warning(\"Couldn't open edf {}. Trying to fix the header ...\".format(file_path))\n f = open(file_path, 'rb')\n content = f.read()\n f.close()\n \n header = content[:256]\n # print(header)\n\n # version = header[:8].decode('ascii')\n # patient_id = header[8:88].decode('ascii')\n # [age] = re.findall(\"Age:(\\d+)\", patient_id)\n # [sex] = re.findall(\"\\s\\w\\s\", patient_id)\n\n recording_id = header[88:168].decode('ascii')\n # startdate = header[168:176]\n # starttime = header[176:184]\n # n_bytes_in_header = header[184:192].decode('ascii')\n # reserved = header[192:236].decode('ascii')\n # THIS IS MESSED UP IN THE HEADER DESCRIPTION\n # duration = header[236:244].decode('ascii')\n # n_data_records = header[244:252].decode('ascii')\n # n_signals = header[252:].decode('ascii')\n \n date = recording_id[10:21]\n day, month, year = date.split('-')\n if month == 'JAN':\n month = '01'\n\n elif month == 'FEB':\n month = '02'\n\n elif month == 'MAR':\n month = '03'\n\n elif month == 'APR':\n month = '04'\n\n elif month == 'MAY':\n month = '05'\n\n elif month == 'JUN':\n month = '06'\n\n elif month == 'JUL':\n month = '07'\n\n elif month == 'AUG':\n month = '08'\n\n elif month == 'SEP':\n month = '09'\n\n elif month == 'OCT':\n month = '10'\n\n elif month == 'NOV':\n month = '11'\n\n elif month == 'DEC':\n month = '12'\n\n year = year[-2:]\n date = '.'.join([day, month, year])\n \n fake_time = '00.00.00'\n \n # n_bytes = int(n_bytes_in_header) - 256\n # n_signals = int(n_bytes / 256)\n # n_signals = str(n_signals) + ' '\n # n_signals = n_signals[:4]\n \n # new_header = version + patient_id + recording_id + date + fake_time + n_bytes_in_header + reserved +\n # new_header += n_data_records + duration + n_signals\n # new_content = (bytes(new_header, encoding=\"ascii\") + content[256:])\n\n new_content = header[:168] + bytes(date + fake_time, encoding=\"ascii\") + header[184:] + content[256:]\n\n # f = open(file_path, 'wb')\n # f.write(new_content)\n # f.close()", "def contents_without_whitespace(self):\n return self.contents.replace(' ', '').replace('\\n', '')", "def fix_document(key, value, _format, _meta):\n if key == \"Link\":\n url = value[2][0]\n if url.startswith(\"user-manual\") or url.startswith(\"developers-guide\"):\n # Return the link text\n return value[1]\n # Reformat the text inside block quotes\n elif key == \"BlockQuote\":\n try:\n first_string = value[0][\"c\"][0][\"c\"]\n if first_string == \"[!NOTE]\":\n value[0][\"c\"][0] = Strong([Str(\"Note:\")])\n return BlockQuote(value)\n elif first_string == \"[!INFO]\":\n value[0][\"c\"][0] = Strong([Str(\"Info:\")])\n return BlockQuote(value)\n elif first_string == \"[!TIP]\":\n value[0][\"c\"][0] = Strong([Str(\"Tip:\")])\n return BlockQuote(value)\n elif first_string == \"[!WARNING]\":\n value[0][\"c\"][0] = Strong([Str(\"Warning:\")])\n return BlockQuote(value)\n elif first_string == \"[!ATTENTION]\":\n value[0][\"c\"][0] = Strong([Str(\"Attention:\")])\n return BlockQuote(value)\n except Exception:\n return\n return", "def fix_headers(filename):\n\n counter = 1\n\n for line in fileinput.input(filename, inplace = True):\n if '>' in line:\n line = line.replace(line, '>'+str(counter)+'\\n')\n counter += 1\n sys.stdout.write(line)", "def remove_extra_newlines(data, akn=True):\n if akn:\n data = data[1:]\n data = re.sub(r'\\n\\s*\\n', '\\n', data)\n return data", "def strip_whitespace_include_newline(source_string):\n return replace_by_dict(source_string,\n replace_dict_whitespace_include_newline)", "def clean_crlf(fpath):\n sub = path.basename(path.dirname(fpath))\n \n with open(fpath, 'rb') as f:\n raw_content = f.read()\n lfnull_content = raw_content.replace(b'\\r',b'')\n \n outpath = path.join('..','sourcedata','ds3','sub-'+sub,'sub-'+sub+'_task-all_beh.tsv')\n with open(outpath, 'w') as f:\n f.write(lfnull_content.decode(\"utf-8\"))\n\n return(pd.read_csv(outpath, delimiter='\\t'))", "def clean(self, line):\r\n m = self.RE.match(line)\r\n if line.strip() == \">\":\r\n return \"\"\r\n elif m:\r\n return m.group(2)\r\n else:\r\n return line", "def parse_header(self):", "def decode_field(field):\r\n field = field.replace('\\r\\n','')\r\n field = field.replace('\\n','')\r\n\r\n list = email.Header.decode_header (field)\r\n\r\n decoded = \" \".join([\"%s\" % k for (k,v) in list])\r\n\r\n #print \"Decoding [%s] to [%s]\" % (field, decoded)\r\n\r\n return decoded", "def parse_frontmatter_and_strip(self):\n assert self._raw_content\n raw_content = self._raw_content\n\n if raw_content.startswith('---'):\n raw_content = raw_content[3:]\n\n tridash_re = re.compile('^-{3,5}\\s*$', re.MULTILINE)\n m = tridash_re.search(raw_content)\n if m:\n start, end = m.span()\n # start is the 1st dash index\n # end is the index of '\\n' in the same line\n self.frontmatter = raw_content[:start]\n self.md = raw_content[end+1:]\n else:\n self.frontmatter = None\n self.md = raw_content\n if self.frontmatter:\n # strings in fm is unicode or ascii depending on whether\n # the object is an ascii string or not\n fm = yaml.load(self.frontmatter)\n else:\n fm = {}\n self.set_tags(fm)\n self.set_title(fm)\n self.set_category(fm)", "def _normalize_linefeeds(a_string):\n newline = re.compile(r'(\\r\\r\\n|\\r\\n|\\n\\r)')\n return newline.sub('\\n', a_string).replace('\\n\\n', '\\n')", "def _fcm_prepare_payload():\n body = 'adfnasdf'\n body = re.sub(ur'<a(.*?)>', r'<a>', body) \n \n payload = {\n \"author_name\":'Nguyen Thai Nam',\n \"model\":'mail.chanel',\n \"res_id\": 1,\n \"db_id\": '2ba3b9ba-b69b-11e6-a8fa-000c29f674d1',\n 'subject': 'Nguyen Thai Nam',\n 'body':html2text.html2text(body),\n }\n \n return payload", "def header_clean_row(row_of_data):\n header = row_of_data.get('header')[1]\n z = list(set(remove_filler_words([header])))\n return z", "def modify(s):\n\tl = s.splitlines(True)\n\ttry:\n\t\tif l[1] in ('\\r\\n', '\\n', '</br>'):\n\t\t\tl = everyother(l)\n\texcept IndexError:\n\t\tpass\n\ttry:\n\t\tif l[-1] not in ('\\r\\n', '\\n', '</br>'):\n\t\t\tl.append('\\r\\n')\n\texcept IndexError:\n\t\tpass\n\treturn ''.join(l)", "def _unmunge_multiline_jinja2(lines):\n start_slug = \"# {# \" + JINJA2_ML_SLUG\n start = len(start_slug)\n stop = len(\" #}\\n\")\n new_lines = []\n for line in lines:\n if line.startswith(start_slug):\n new_lines.append(line[start:-stop] + \"\\n\")\n else:\n new_lines.append(line)\n return new_lines", "def _strip_excerpt(self, raw_html):\n clean_regex = re.compile(\"<.*?>\")\n clean_text = re.sub(clean_regex, \"\", raw_html)\n return html.unescape(clean_text).replace(\"\\n\", \"\")", "def fix_horizontal(line):\n\tline = line.rstrip()\n\tline = untabify(line, tab_width)\n\treturn line + '\\n'", "def _fix_l1b_header(filename):\n try:\n # First try it with the astropy .to_string() method, as this is the easiest.\n hdr = fits.getheader(filename)\n hdr_str = hdr.tostring()\n except Exception:\n # Read the file manually as bytes until we hit a UnicodeDecodeError, i.e.\n # until we reach the data part. Since astropy version 4.2.1, we can't use\n # the .to_string() method anymore because of FITS header consistency checks\n # that cannot be overridden, and they won't fix it unfortunately. If the\n # input file is a .gz file, we need to unpack it first to the tmp directory.\n temp_dir = tempfile.gettempdir()\n name = Path(filename).name\n is_gz_file = False\n if name.endswith(\".gz\"):\n is_gz_file = True\n with gzip.open(filename, \"r\") as gfile:\n filename = str(Path(temp_dir) / name[:-3])\n with open(filename, \"wb\") as file_out:\n file_out.write(gfile.read())\n hdr_str = \"\"\n with open(filename, \"rb\") as file:\n counter = 1\n while True:\n try:\n this_line = file.read(counter)\n this_str = this_line.decode(\"utf-8\")\n hdr_str += this_str\n counter += 1\n except UnicodeDecodeError:\n break\n if is_gz_file:\n os.remove(filename)\n # Make a list of strings with a length of 80\n hdr_list = [hdr_str[i : i + 80] for i in range(0, len(hdr_str), 80)]\n # Remove all the empty entries\n while \" \" * 80 in hdr_list:\n hdr_list.remove(\" \" * 80)\n hdr_list_new = []\n for count, item in enumerate(hdr_list):\n if count <= len(hdr_list) - 2:\n if (\n hdr_list[count][0:8] != \"CONTINUE\"\n and hdr_list[count + 1][0:8] != \"CONTINUE\"\n ):\n hdr_list_new.append(hdr_list[count])\n else:\n if (\n hdr_list[count][0:8] != \"CONTINUE\"\n and hdr_list[count + 1][0:8] == \"CONTINUE\"\n ):\n ampersand_pos = hdr_list[count].find(\"&\")\n if ampersand_pos != -1:\n new_entry = hdr_list[count][0:ampersand_pos]\n else:\n raise RuntimeError(\n \"There should be an ampersand at the end of a CONTINUE'd keyword.\"\n )\n tmp_count = 1\n while hdr_list[count + tmp_count][0:8] == \"CONTINUE\":\n ampersand_pos = hdr_list[count + tmp_count].find(\"&\")\n if ampersand_pos != -1:\n first_sq_pos = hdr_list[count + tmp_count].find(\"'\")\n if first_sq_pos != -1:\n new_entry = (\n new_entry\n + hdr_list[count + tmp_count][\n first_sq_pos + 1 : ampersand_pos\n ]\n )\n else:\n raise RuntimeError(\n \"There should be two single quotes after CONTINUE. Did not find any.\"\n )\n else:\n # If there is no ampersand at the end anymore, it means the entry ends here.\n # Read from the first to the second single quote in this case.\n first_sq_pos = hdr_list[count + tmp_count].find(\"'\")\n if first_sq_pos != -1:\n second_sq_pos = hdr_list[count + tmp_count][\n first_sq_pos + 1 :\n ].find(\"'\")\n if second_sq_pos != -1:\n new_entry = (\n new_entry\n + hdr_list[count + tmp_count][\n first_sq_pos\n + 1 : second_sq_pos\n + 1\n + first_sq_pos\n ].rstrip()\n + \"'\"\n )\n else:\n raise RuntimeError(\n \"There should be two single quotes after CONTINUE. Found the first, but not the second.\"\n )\n else:\n raise RuntimeError(\n \"There should be two single quotes after CONTINUE. Did not find any.\"\n )\n tmp_count += 1\n hdr_list_new.append(new_entry)\n else:\n continue\n else:\n # Add END at the end of the header\n hdr_list_new.append(hdr_list[count])\n # Now we stitch together the CONTINUE information correctly,\n # with a \"\\n\" at the end that we use as a separator later on\n # when we convert from a string to an astropy header.\n for count, item in enumerate(hdr_list_new):\n if len(item) > 80:\n this_entry = item[0:78] + \"&'\\n\"\n rest = \"CONTINUE '\" + item[78:]\n while len(rest) > 80:\n this_entry = this_entry + rest[0:78] + \"&'\\n\"\n rest = \"CONTINUE '\" + rest[78:]\n this_entry = this_entry + rest\n hdr_list_new[count] = this_entry\n # Now we should have the correct list of strings. Since we can't convert a list to a\n # FITS header directly, we have to convert it to a string first, separated by \"\\n\".\n hdr_str_new = \"\\n\".join([str(item) for item in hdr_list_new])\n hdr_corr = fits.Header.fromstring(hdr_str_new, sep=\"\\n\")\n return hdr_corr", "def remove_extra_newlines(self):\n self.value = re.sub(self.patterns['extra_newlines'], '\\n', self.value)\n return self", "def strip_whitespace(data):\n if isinstance(data, bytes):\n return re.sub(br'\\s+', b'', data)\n else:\n return re.sub(r'\\s+', '', data)", "def FilterRawEmail(raw_msg):\r\n links = []\r\n soup = BeautifulSoup(raw_msg, features=\"lxml\")\r\n for a_tag in soup.find_all(\"a\", href=True):\r\n link = a_tag[\"href\"]\r\n if (len(link) < 10):\r\n continue\r\n else:\r\n print(\"Before Cleaning: \", link, end=\"\\n\\n\")\r\n clean_link = parse.unquote_plus(quopri.decodestring(link).decode('utf-8'))\r\n print(\"Link: \", clean_link, end = \"\\n\\n\")\r\n links.append(clean_link)\r\n return links\r\n\r\n\r\n# =============================================================================\r\n# =============================================================================\r\n\r\n\r\n def WriteToFile(msg, file_name):\r\n \"\"\"Write out a message to a file for debugging purposes.\r\n Args:\r\n msg: a message object\r\n file_name: the output file name\r\n Returns:\r\n None\r\n \"\"\"\r\n out_msg = str(msg)\r\n file = open(file_name, \"w\")\r\n file.write(str(decoded_msg))", "def clean(self, line):\n m = self.RE.match(line)\n if line.strip() == \">\":\n return \"\"\n elif m:\n return m.group(2)\n else:\n return line", "def parse_header(self, out):\n self.headers = {}\n for h in out.split(\"\\r\\n\\r\\n\", 1)[0].split(\"\\r\\n\"):\n x = h.split(\":\")\n self.headers[x[0]] = \":\".join(x[1:]).lstrip()\n return True", "def clean_data(self):\n for line in self.file:\n if line.startswith('//') or line.isspace():\n continue\n if '//' in line:\n line = line.split('//')[0]\n line = line.replace('\\n', '')\n line = line.replace(' ','')\n self.commands.append(line)", "def norm_html_from_html(html):\n if not isinstance(html, unicode):\n html = html.decode('utf-8')\n html = _markdown_email_link_re.sub(\n _markdown_email_link_sub, html)\n if sys.platform == \"win32\":\n html = html.replace('\\r\\n', '\\n')\n return html", "def preprocess_msg(self):\n self.tmp_msg = self.tmp_msg.lower()\n cleared = ''\n for ch in self.tmp_msg:\n if ch in string.ascii_lowercase:\n cleared += ch\n\n c = ''\n for ch in cleared:\n c += '{:02d}'.format(ord(ch) - 97)\n if len(c) % 4 != 0:\n c += '99'\n self.tmp_msg = c\n\n super().preprocess_msg()", "def treat_bom(sHtml):\n\n # Check if it is in the beginning\n bStartsWithBom = sHtml.startswith(u'\\ufeff')\n # Remove everywhere\n sHtml = sHtml.replace(u'\\ufeff', '')\n # Return what we have\n return sHtml", "def __clean_line_comments(self):\n self.lines = [l for l in self.lines if not l.startswith(\"//\") and len(l) != 0]", "def __clean_line_comments(self):\n self.lines = [l for l in self.lines if not l.startswith(\"//\") and len(l) != 0]", "def small_preprocess(data):\r\n \r\n # Remove new line characters\r\n data = [re.sub('\\s+', ' ', sent) for sent in data]\r\n # Remove distracting single quotes\r\n data = [re.sub(\"\\'\", \"\", sent) for sent in data]\r\n\r\n return data", "def wrap_lines(msg: str) -> str:\n lines = msg.splitlines()\n fixed_l = []\n\n for line in lines:\n fixed_l.append(textwrap.fill(\n line,\n 80,\n break_long_words=False,\n break_on_hyphens=False))\n\n return '\\n'.join(fixed_l)", "def getContents(self):\n normal_body_regex = re.compile(r'[ \\n\\r\\t]+')\n return normal_body_regex.sub(' ', self.contents)", "def stripText(self, rawText):\n strippedText = []\n for line in rawText:\n if line.rstrip():\n if line[0] != '#':\n strippedText.append(line.rstrip()) #also remove newline character\n return strippedText", "def beautify(text):\n text = re.sub('\\n{3,}', '\\n\\n', text)\n text = re.sub('\\n+$', '\\n', text)\n return text", "def RemoveNonUtf8BadChars(line):\n return \"\".join([ch for ch in line if ch in printable])", "def pretty(self):\n\t\tif not self.content.endswith('\\n'):\n\t\t\tself.content += '\\n'", "def ignorableWhitespace(self, data):\n pass", "def remove_bad_characters(self):\n\n self.categorie_name = self.categorie_name.replace(\"\\n\", \"\")", "def _trunc_lines(self):\n\t\tif self._appendMessages:\n\t\t\tself._trunc_lines_append()\n\t\telse:\n\t\t\tself._trunc_lines_prepend()", "def chomp(self, payload):\n s_id, payload = payload.split('\\n', 1)\n if (s_id + '\\n') != self.identifier:\n raise DeserializationError(\"Buffer does not start with identifier:{}\".format(self.identifier))\n return payload", "def cleanUpString(text):\r\n if text is None or text == '':\r\n return text\r\n try:\r\n text = text.encode(\"utf-8\")\r\n except:\r\n newText = \"\"\r\n t = text.decode(\"utf-8\")\r\n for c in t:\r\n newC = c\r\n if ord(c)>127:\r\n newC = \"&#%s;\" % ord(c)\r\n if ord(c)==8211:\r\n #change to this otherwise the toc has &#8211; value instead of endash\r\n newC = chr(45)\r\n if ord(c)==160:\r\n #&nbsp;\r\n newC = \" \"\r\n newText += newC\r\n text = newText\r\n text = str(text)\r\n return text", "def strip_gutenberg_header(input_book_lines):\n\tlines = input_book_lines\n\tcurr_line = 0\n\twhile lines[curr_line].find('START OF THIS PROJECT GUTENBERG EBOOK') == -1:\n\t\tcurr_line += 1\n\treturn lines[curr_line+1:]", "def _chop_end_codes(line):\n return re.sub(r\"\\s\\s\\s\\s+[\\w]{4}.\\s+\\d*\\Z\", \"\", line)", "def test_asciitable_m_special_chars_in_header(self):\n input = '''\n+----------+------------+-----------+----------------+-------+--------------------+\n| Protocol | Address | Age (min) | Hardware Addr | Type | Interface |\n| | | of int | | | |\n+----------+------------+-----------+----------------+-------+--------------------+\n| Internet | 10.12.13.1 | 98 | 0950.5785.5cd1 | ARPA | FastEthernet2.13 |\n+----------+------------+-----------+----------------+-------+--------------------+\n '''\n expected = [\n {\n \"protocol\": \"Internet\",\n \"address\": \"10.12.13.1\",\n \"age_min_of_int\": \"98\",\n \"hardware_addr\": \"0950.5785.5cd1\",\n \"type\": \"ARPA\",\n \"interface\": \"FastEthernet2.13\"\n }\n ]\n\n self.assertEqual(jc.parsers.asciitable_m.parse(input, quiet=True), expected)", "def missing_colon_google_style_section(): # noqa: D406, D407" ]
[ "0.6554932", "0.6418198", "0.64063436", "0.6094776", "0.60202503", "0.5935732", "0.58566016", "0.58263063", "0.57753146", "0.57245463", "0.56043744", "0.5570992", "0.55153716", "0.55127054", "0.5502749", "0.54770464", "0.5456795", "0.5437268", "0.53645223", "0.5352466", "0.5350329", "0.5307941", "0.5281954", "0.5278494", "0.5277922", "0.5275608", "0.5268338", "0.52614194", "0.52614194", "0.52552664", "0.5253965", "0.52457494", "0.5233711", "0.5233651", "0.52256954", "0.52194685", "0.52127075", "0.5207752", "0.5185552", "0.51742387", "0.51697963", "0.5160043", "0.5152414", "0.512217", "0.5118866", "0.51184094", "0.51081705", "0.51068014", "0.5103392", "0.5090349", "0.5089297", "0.50781196", "0.50633013", "0.50476277", "0.5042138", "0.5028887", "0.50258565", "0.5020389", "0.5004667", "0.5001172", "0.5001075", "0.497018", "0.49608117", "0.4949626", "0.4945049", "0.4942476", "0.49404263", "0.49393988", "0.49353045", "0.4933254", "0.49284798", "0.49276257", "0.49256605", "0.49234003", "0.49048123", "0.49029994", "0.4900299", "0.48967892", "0.48917022", "0.48855808", "0.48802555", "0.48773384", "0.48769024", "0.48769024", "0.48700452", "0.48694813", "0.48624593", "0.4855363", "0.48511627", "0.48510292", "0.48478287", "0.48466536", "0.48364502", "0.48352233", "0.4822315", "0.48197854", "0.48186874", "0.48152658", "0.48075047", "0.47982252" ]
0.73432523
0
Some dumps of yahoo messages introduce weird line breaks into header content, making them impossible to parse. This function will fix this content.
def fix_broken_yahoo_headers(text): end_of_header_match = _end_of_multipart_header_pattern.search(text) temp_header_text = text[:end_of_header_match.end()].strip() lines = temp_header_text.splitlines() fixed_header_lines = reduce(_merge_broken_header_lines, lines, []) return_text = os.linesep.join(fixed_header_lines) + '\r\n\r\n' + text[end_of_header_match.end():] return return_text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fix_broken_hotmail_headers(text):\n end_of_header_match = _end_of_simple_header_pattern.search(text)\n temp_header_text = text[:end_of_header_match.end()].strip()\n lines = temp_header_text.splitlines()[1:] # first line is not a header...\n fixed_header_lines = reduce(_merge_broken_header_lines, lines, [])\n return_text = os.linesep.join(fixed_header_lines) + text[end_of_header_match.end():]\n return return_text", "def normalizeRawFromHeader(value):\n return value.replace('\\n', '').replace('\\r', '').strip()", "def clean_header(klass, s):\n return re.sub(r\"[\\n\\r\\t]+\", \" \", s).strip()", "def cleanup_raw_data(buf):\n raw = str(buf, encoding='iso-8859-1').strip()\n records = raw.splitlines()\n return records", "def clean(row):\r\n for v in row:\r\n \tv = v.replace(\"\\xef\\xbb\\xbf\",\"\")\r\n return row", "def _headercorrected(hdr):\n # COM*** -> COMMENT\n i = 1\n while 'COM%03d' % i in hdr:\n value = hdr['COM%03d' % i]\n comment = hdr.cards['COM%03d' % i].comment\n hdr['COMMENT'] = '[%s] %s' % (comment, value)\n del hdr['COM%03d' % i]\n i += 1\n # HIST*** -> HISTORY\n i = 1\n while 'HIST%03d' % i in hdr:\n value = hdr['HIST%03d' % i]\n comment = hdr.cards['HIST%03d' % i].comment\n hdr['HISTORY'] = '%s (%s)' % (value, comment)\n del hdr['HIST%03d' % i]\n i += 1\n # ORIGIN -> FROM\n if 'ORIGIN' in hdr.keys():\n hdr.rename_keyword('ORIGIN', 'FROM')\n if 'ORIGIN_V' in hdr.keys():\n hdr.rename_keyword('ORIGIN_V', 'FROM_V')\n # SOURCE_V -> FORMAT\n if 'SOURCE_V' in hdr.keys():\n hdr.rename_keyword('SOURCE_V', 'FORMAT')\n # SRC_VERS -> SRC_V\n if 'SRC_VERS' in hdr.keys():\n hdr.rename_keyword('SRC_VERS', 'SRC_V')", "def __stripEol(self, txt):\n return txt.replace(\"\\r\", \"\").replace(\"\\n\", \"\")", "def __prepare_content(self, content):\n if isinstance(content, str):\n content = content.encode('utf-8')\n return b\"{%d+}%s%s\" % (len(content), CRLF, content)", "def process_content(self, content):\n content = re.sub('\\[\\d+\\]', '', content)\n content = re.sub('\\n+', '\\n', content)\n content = re.sub(r'Bài liên quan:.*\\n', '', content)\n return content.strip()", "def clean_content(self) -> str:", "def _preprocess(self, source):\n source = source.replace(u'\\n', u'').strip()\n source = re.sub(r'<br\\s*\\/?\\s*>', u' ', source, re.I)\n source = re.sub(r'\\s\\s+', u' ', source)\n return source", "def _fixHeaderLength(self):\n self.header.seek(0)\n lines = self.header.readlines()\n headlength = len(lines)\n lines[0] = wrapLine(\"NLHEAD_FFI\", self.annotation, self.delimiter, \"%d%s%d\\n\" % (headlength, self.delimiter, self.FFI))\n self.header = StringIO(\"\".join(lines))\n self.header.seek(0)", "def process_message(message):\r\n message = gensim.utils.to_unicode(message, 'latin1').strip()\r\n blocks = message.split(u'\\n\\n')\r\n # skip email headers (first block) and footer (last block)\r\n content = u'\\n\\n'.join(blocks[1:])\r\n return content", "def test_normalize_linefeeds():\n text = \"\"\"show hostname\\r\nshow version\\r\\r\nshow inventory\\r\\r\\r\nshow interfaces\n\\r\"\"\"\n expected = \"\"\"show hostname\nshow version\nshow inventory\nshow interfaces\n\"\"\"\n connection = FakeBaseConnection(RESPONSE_RETURN=\"\\n\")\n result = connection.normalize_linefeeds(text)\n assert result == expected", "def parse_header(self):\n bodystart=re.compile(r\"<body>\", re.IGNORECASE).search(self.lines).span()[1]\n oldheader=self.lines[0:bodystart]\n start=re.compile(\"<title>\", re.IGNORECASE).search(oldheader).span()[1]\n finish=re.compile(\"</title>\", re.IGNORECASE).search(oldheader).span()[0]\n titles=oldheader[start:finish].split(\"--\")\n # Duplicate if needed\n if len(titles)==1: titles.append(titles[0])\n self.title, self.header= {}, {}\n for i, lang in enumerate(LANGS):\n self.title[lang]=titles[i]\n self.header[lang]=\"%s%s%s\" % (oldheader[:start], self.title[lang], oldheader[finish:],)", "def clean_chunk(chunk):\n return '\\n'.join([x[1:] for x in chunk.split('\\n')\n if x and x[0] not in ('-', '@')])", "def _remove_new_line(self, message):\n if message.endswith('\\n'):\n return message[:-1]\n return message", "def cleanup(self, body):\n body = re.sub(\"&gt;\", \">\", body) # Recode HTML codes\n body = re.sub(\"&lt;\", \"<\", body)\n body = re.sub(\"&amp;\", \"&\", body)\n body = re.sub(\"&nbsp;\", \" \", body)\n # body = re.sub(\"^\\[deleted\\]$\", \"\", body) # [deleted] and [removed]: delete entire row from dataframe\n body = re.sub(\"http\\S+\", \" \", body) # Remove URL\n body = re.sub(\"/r/\\S+|/u/\\S+\", \" \", body) # Remove /r/subreddit, /u/user\n # body = re.sub(\"(>.*?\\\\n\\\\n)+\", \" \", body) # Remove quoted comments\n # body = re.sub(\"[[:cntrl:]]\", \" \", body) # Remove control characters (\\n, \\b) doesn't work for unicode\n body = \"\".join(ch for ch in body if unicodedata.category(ch)[0]!=\"C\") # Remove control characters (\\n, \\b) etc.\n body = re.sub(\"'\", \"\", body) # Remove single quotation marks (contractions)\n # body = re.sub(\"[[:punct:]]\", \" \", body) # Remove punctuation\n body = \"\".join(ch for ch in body if unicodedata.category(ch)[0]!=\"P\") # Remove punctuation\n body = re.sub(\"\\\\s+\", \" \", body) # Replace multiple spaces with single space\n body = body.strip()\n body = body.lower() # Lower case\n return body # Return body (cleaned up text)", "def fix_big_encoded_urls(message):\n try:\n new_message = urllib.parse.unquote(message)\n except: # noqa\n pass\n if new_message != message:\n return re.sub(r\"[\\(\\)\\{\\}#%]\", \" \", new_message)\n return message", "def clean(line):\n line = line.strip('\\n').strip()\n line = line.replace('\\xe2\\x80\\x93', '-')\n line = line.replace('\\xe2\\x80\\x99', '\\'')\n\n return line", "def prepare_msg(raw_message):\n\n raw_message = str(raw_message)\n\n raw_message = raw_message.lower()\n raw_message = raw_message.replace(\"bismarkb1996\", \"\")\n raw_message = raw_message.replace(\"id336383265\", \"\")\n raw_message = re.sub('[^а-яА-Яa-zA-Z0-9\\\\s\\\\-]+', '', raw_message)\n\n split_message = raw_message.split(\" \")\n logger.debug(\"Split message: \" + str(split_message))\n\n message = []\n for msg in [x.split(\"-\") for x in split_message]:\n for i in msg:\n if i != \"\":\n message.append(i)\n\n return message", "def _chop_end_codes(line):\n return re.sub(r\"\\s\\s\\s\\s+[\\w]{4}.\\s+\\d*\\Z\", \"\", line)", "def normalize(self):\n self.header.set_length(self.body)", "def clean_data(td):\n data = td.string\n try:\n return data.strip(\" \\n:-\")\n except AttributeError:\n return u\"\"", "def clean_line(self, line):\n\n if \"#\" in line:\n temp = line.split(\"#\")\n if len(temp) < 2:\n return \"\"\n else:\n temp = temp[0] + \"\\n\"\n\n # make sure the \"#\" isn't in quotes\n if temp.count(\"\\\"\") % 2 == 0:\n line = temp\n\n line = line.replace(\"}\", \" } \").replace(\"{\", \" { \")\n while \"=\" in line:\n line = self.replace_equals(line)\n line = line.lstrip()\n return line", "def cleaned_contents(self):\n snip_with_code = re.compile(\"(//.*snip(\\-file)*:?.*\\n)(\\+\\n)?(\\[.*\\]\\n)*----\\n(.*\\n)*?----\\n\", flags=re.IGNORECASE)\n cleaned = re.sub(snip_with_code, r'\\1', self.contents)\n return cleaned", "def _clean_data(self, stn, ignore_errors):\n # Confirm that we got some data, and confirm that it's the\n # expected web page by checking the <title> tag contents\n if (not self.data) | (not self.data[0].startswith(\n '<TITLE>Generate WXP 24-Hour Meteogram</TITLE>')):\n if ignore_errors:\n sys.stderr.write('Invalid data returned for '\n '%4i-%02i-%02i\\n'\n % (self.year, self.month, self.day))\n self.data = ''\n return\n else:\n raise UnexpectedPageError\n # Get rid of the <title> and <pre> tag lines\n self.data = self.data[2:]\n # Confirm that data is available for the specified date\n if self.data[0].startswith('No data were found for date'):\n if ignore_errors:\n sys.stderr.write('%4i-%02i-%02i data missing\\n'\n % (self.year, self.month, self.day))\n self.data = ''\n return\n else:\n raise UnexpectedPageError\n #Get rid of the station location and following blank line\n self.data = self.data[2:]\n # Confirm that we got the data for the expected station by\n # checking the \"METAR Data for\" line contents\n if not self.data[0].startswith(\n ' '.join((\"METAR Data for\", stn))):\n if ignore_errors:\n sys.stderr.write('%4i-%02i-%02i data missing '\n 'or incorrect station returned\\n'\n % (self.year, self.month, self.day))\n self.data = ''\n return\n else:\n raise UnexpectedPageError\n # Get rid of the \"METAR Data for\" line and following blank\n # line\n self.data = self.data[2:]\n # Date part of timestamp for each line of data\n datestamp = '%4i %02i %02i' % (self.year, self.month, self.day)\n # Clean up each line\n i = 0\n try:\n while True:\n # Continuations from the previous line start with 5\n # spaces\n if self.data[i].startswith(' '*5):\n # Concatenate continuation to previous line\n self.data[i-1] = ' '.join((self.data[i-1][:-1],\n self.data[i][5:]))\n # Get rid of continuation text that we just consumed\n self.data.pop(i)\n # Get rid of file parse error lines\n if self.data[i].startswith('SFC_parse_file:'):\n self.data.pop(i)\n continue\n # Get rid of SPECI prefix\n if self.data[i].startswith('SPECI'):\n self.data[i] = self.data[i][6:]\n fields = self.data[i].split()\n # Add METAR prefix if it's missing\n if fields[0] != 'METAR':\n fields.insert(0, 'METAR')\n self.data[i] = ' '.join(('METAR', self.data[i]))\n # Add hour to timestamp, and prepend timestamp to line\n self.data[i] = ' '.join((datestamp, fields[2][2:4],\n self.data[i]))\n # Get rid of duplicate data lines\n if self.data[i] == self.data[i-1]:\n self.data.pop(i)\n continue\n i += 1\n except IndexError:\n # No more data lines\n pass", "def fix_page_content(filename, content):\n return JournalStaticPage(filename, content).body", "def clean_smile(self, smi):\n smi = smi.replace('\\n', '')\n return smi", "def fix_document(key, value, _format, _meta):\n if key == \"Link\":\n url = value[2][0]\n if url.startswith(\"user-manual\") or url.startswith(\"developers-guide\"):\n # Return the link text\n return value[1]\n # Reformat the text inside block quotes\n elif key == \"BlockQuote\":\n try:\n first_string = value[0][\"c\"][0][\"c\"]\n if first_string == \"[!NOTE]\":\n value[0][\"c\"][0] = Strong([Str(\"Note:\")])\n return BlockQuote(value)\n elif first_string == \"[!INFO]\":\n value[0][\"c\"][0] = Strong([Str(\"Info:\")])\n return BlockQuote(value)\n elif first_string == \"[!TIP]\":\n value[0][\"c\"][0] = Strong([Str(\"Tip:\")])\n return BlockQuote(value)\n elif first_string == \"[!WARNING]\":\n value[0][\"c\"][0] = Strong([Str(\"Warning:\")])\n return BlockQuote(value)\n elif first_string == \"[!ATTENTION]\":\n value[0][\"c\"][0] = Strong([Str(\"Attention:\")])\n return BlockQuote(value)\n except Exception:\n return\n return", "def break_long_headers(header):\n if len(header) > 160 and ',' in header:\n header = mark_safe('<br> ' + ', <br>'.join(header.split(',')))\n return header", "def _force_CRLF(self, data):\n return CRLF_RE.sub('\\r\\n', data)", "def treat_bom(sHtml):\n\n # Check if it is in the beginning\n bStartsWithBom = sHtml.startswith(u'\\ufeff')\n # Remove everywhere\n sHtml = sHtml.replace(u'\\ufeff', '')\n # Return what we have\n return sHtml", "def no_underline_and_no_newline(): # noqa: D416", "def stripText(self, rawText):\n strippedText = []\n for line in rawText:\n if line.rstrip():\n if line[0] != '#':\n strippedText.append(line.rstrip()) #also remove newline character\n return strippedText", "def parse_data(self, data):\n output=[]\n for entry in data:\n output.append(entry.replace('\\r','').replace('\\n',''))\n return output", "def _fix_l1b_header(filename):\n try:\n # First try it with the astropy .to_string() method, as this is the easiest.\n hdr = fits.getheader(filename)\n hdr_str = hdr.tostring()\n except Exception:\n # Read the file manually as bytes until we hit a UnicodeDecodeError, i.e.\n # until we reach the data part. Since astropy version 4.2.1, we can't use\n # the .to_string() method anymore because of FITS header consistency checks\n # that cannot be overridden, and they won't fix it unfortunately. If the\n # input file is a .gz file, we need to unpack it first to the tmp directory.\n temp_dir = tempfile.gettempdir()\n name = Path(filename).name\n is_gz_file = False\n if name.endswith(\".gz\"):\n is_gz_file = True\n with gzip.open(filename, \"r\") as gfile:\n filename = str(Path(temp_dir) / name[:-3])\n with open(filename, \"wb\") as file_out:\n file_out.write(gfile.read())\n hdr_str = \"\"\n with open(filename, \"rb\") as file:\n counter = 1\n while True:\n try:\n this_line = file.read(counter)\n this_str = this_line.decode(\"utf-8\")\n hdr_str += this_str\n counter += 1\n except UnicodeDecodeError:\n break\n if is_gz_file:\n os.remove(filename)\n # Make a list of strings with a length of 80\n hdr_list = [hdr_str[i : i + 80] for i in range(0, len(hdr_str), 80)]\n # Remove all the empty entries\n while \" \" * 80 in hdr_list:\n hdr_list.remove(\" \" * 80)\n hdr_list_new = []\n for count, item in enumerate(hdr_list):\n if count <= len(hdr_list) - 2:\n if (\n hdr_list[count][0:8] != \"CONTINUE\"\n and hdr_list[count + 1][0:8] != \"CONTINUE\"\n ):\n hdr_list_new.append(hdr_list[count])\n else:\n if (\n hdr_list[count][0:8] != \"CONTINUE\"\n and hdr_list[count + 1][0:8] == \"CONTINUE\"\n ):\n ampersand_pos = hdr_list[count].find(\"&\")\n if ampersand_pos != -1:\n new_entry = hdr_list[count][0:ampersand_pos]\n else:\n raise RuntimeError(\n \"There should be an ampersand at the end of a CONTINUE'd keyword.\"\n )\n tmp_count = 1\n while hdr_list[count + tmp_count][0:8] == \"CONTINUE\":\n ampersand_pos = hdr_list[count + tmp_count].find(\"&\")\n if ampersand_pos != -1:\n first_sq_pos = hdr_list[count + tmp_count].find(\"'\")\n if first_sq_pos != -1:\n new_entry = (\n new_entry\n + hdr_list[count + tmp_count][\n first_sq_pos + 1 : ampersand_pos\n ]\n )\n else:\n raise RuntimeError(\n \"There should be two single quotes after CONTINUE. Did not find any.\"\n )\n else:\n # If there is no ampersand at the end anymore, it means the entry ends here.\n # Read from the first to the second single quote in this case.\n first_sq_pos = hdr_list[count + tmp_count].find(\"'\")\n if first_sq_pos != -1:\n second_sq_pos = hdr_list[count + tmp_count][\n first_sq_pos + 1 :\n ].find(\"'\")\n if second_sq_pos != -1:\n new_entry = (\n new_entry\n + hdr_list[count + tmp_count][\n first_sq_pos\n + 1 : second_sq_pos\n + 1\n + first_sq_pos\n ].rstrip()\n + \"'\"\n )\n else:\n raise RuntimeError(\n \"There should be two single quotes after CONTINUE. Found the first, but not the second.\"\n )\n else:\n raise RuntimeError(\n \"There should be two single quotes after CONTINUE. Did not find any.\"\n )\n tmp_count += 1\n hdr_list_new.append(new_entry)\n else:\n continue\n else:\n # Add END at the end of the header\n hdr_list_new.append(hdr_list[count])\n # Now we stitch together the CONTINUE information correctly,\n # with a \"\\n\" at the end that we use as a separator later on\n # when we convert from a string to an astropy header.\n for count, item in enumerate(hdr_list_new):\n if len(item) > 80:\n this_entry = item[0:78] + \"&'\\n\"\n rest = \"CONTINUE '\" + item[78:]\n while len(rest) > 80:\n this_entry = this_entry + rest[0:78] + \"&'\\n\"\n rest = \"CONTINUE '\" + rest[78:]\n this_entry = this_entry + rest\n hdr_list_new[count] = this_entry\n # Now we should have the correct list of strings. Since we can't convert a list to a\n # FITS header directly, we have to convert it to a string first, separated by \"\\n\".\n hdr_str_new = \"\\n\".join([str(item) for item in hdr_list_new])\n hdr_corr = fits.Header.fromstring(hdr_str_new, sep=\"\\n\")\n return hdr_corr", "def prepare_content(content):\n \n result = []\n for line in content.splitlines():\n # turn urls into actual links\n match = find_url.match(line)\n if match:\n line = '{0}[{1}]({1}){2}'.format(match.group(1), match.group(2), match.group(3))\n\n # TBD: escape '|' character to protect table integrity.\n \n # correct heading levels (add two)\n if line.startswith('#') and line.endswith('#'):\n result.append('##{0}##'.format(unlines(line)))\n else:\n result.append(line)\n \n return '<br>'.join(result)", "def parse_header(self):", "def _collapse_leading_ws(header, txt):\n if header.lower() == 'description': # preserve newlines\n return '\\n'.join([x[8:] if x.startswith(' ' * 8) else x\n for x in txt.strip().splitlines()])\n else:\n return ' '.join([x.strip() for x in txt.splitlines()])", "def FilterRawEmail(raw_msg):\r\n links = []\r\n soup = BeautifulSoup(raw_msg, features=\"lxml\")\r\n for a_tag in soup.find_all(\"a\", href=True):\r\n link = a_tag[\"href\"]\r\n if (len(link) < 10):\r\n continue\r\n else:\r\n print(\"Before Cleaning: \", link, end=\"\\n\\n\")\r\n clean_link = parse.unquote_plus(quopri.decodestring(link).decode('utf-8'))\r\n print(\"Link: \", clean_link, end = \"\\n\\n\")\r\n links.append(clean_link)\r\n return links\r\n\r\n\r\n# =============================================================================\r\n# =============================================================================\r\n\r\n\r\n def WriteToFile(msg, file_name):\r\n \"\"\"Write out a message to a file for debugging purposes.\r\n Args:\r\n msg: a message object\r\n file_name: the output file name\r\n Returns:\r\n None\r\n \"\"\"\r\n out_msg = str(msg)\r\n file = open(file_name, \"w\")\r\n file.write(str(decoded_msg))", "def rfc822_escape(header):\n lines = header.split('\\n')\n sep = '\\n' + 8 * ' '\n return sep.join(lines)", "def minimalTextCleaning(row, field):\n\n # force encoding\n encoded_text = row[field].encode(encoding = 'ascii',errors = 'replace')\n decoded_text = encoded_text.decode(encoding='ascii',errors='strict')\n remove_funky_chars = str(decoded_text).replace(\"?\", \" \")\n lower_case = str(remove_funky_chars).lower().strip()\n\n # strip redundant whitespace\n cleaned_text = re.sub(' +', ' ', lower_case)\n\n\n # strip signature lines\n cleaned_text = cleaned_text.replace(\"_\", \"\")\n\n return cleaned_text", "def cleanup_newlines(string):\n return string.replace(\"\\r\\n\", \"\\r\").replace(\"\\n\", \"\\r\")", "def clean_body(s):\n patbal = re.compile('<.*?>',flags = re.S)\n patspace = re.compile('\\W+',flags = re.S)\n return re.sub(patspace,' ',re.sub(patbal,'',s))", "def clean_body(s):\n patbal = re.compile('<.*?>',flags = re.S)\n patspace = re.compile('\\W+',flags = re.S)\n return re.sub(patspace,' ',re.sub(patbal,'',s))", "def return_text_without_headlines(text):\n\n text = text.replace('\\\\n', '\\n')\n text = text.replace('\\\\r', '\\r')\n text = re.sub('h1. (.*)\\r', '', text)\n text = re.sub('h2. (.*)\\r', '', text)\n text = re.sub('h2. (.*)', '', text)\n text = re.sub('h3. (.*)\\r', '', text)\n text = re.sub('h4. (.*)\\r', '', text)\n text = text.replace('*acceptance criteria:*', \"\")\n text = text.replace('*acceptance criteria*:', \"\")\n text = text.replace('*acceptance criteria*', \"\")\n text = text.replace('*story:*', \"\")\n text = text.replace('*story*:', \"\")\n text = text.replace('*story*', \"\")\n text = text.replace('*stories:*', \"\")\n text = text.replace('*questions:*', \"\")\n text = text.replace('*questions*:', \"\")\n text = text.replace('*questions*', \"\")\n text = text.replace('*implementation notes:*', \"\")\n text = text.replace('*implementation notes*:', \"\")\n text = text.replace('*implementation notes*', \"\")\n text = text.replace('*notes:*', \"\")\n text = text.replace('*notes*:', \"\")\n text = text.replace('*notes*', \"\")\n text = text.replace('*Acceptance Criteria:*', \"\")\n text = text.replace('*Acceptance Criteria*:', \"\")\n text = text.replace('*Acceptance Criteria*', \"\")\n text = text.replace('*Story:*', \"\")\n text = text.replace('*Story*:', \"\")\n text = text.replace('*Story*', \"\")\n text = text.replace('*Stories:*', \"\")\n text = text.replace('*Questions:*', \"\")\n text = text.replace('*Questions*:', \"\")\n text = text.replace('*Questions*', \"\")\n text = text.replace('*Implementation Notes:*', \"\")\n text = text.replace('*Implementation Notes*:', \"\")\n text = text.replace('*Implementation Notes*', \"\")\n text = text.replace('*Notes:*', \"\")\n text = text.replace('*Notes*:', \"\")\n text = text.replace('*Notes*', \"\")\n text = text.replace('*Acceptance criteria:*', \"\")\n text = text.replace('*Acceptance criteria*:', \"\")\n text = text.replace('*Acceptance criteria*', \"\")\n text = text.replace('*Implementation notes:*', \"\")\n text = text.replace('*Implementation notes*:', \"\")\n text = text.replace('*Implementation notes*', \"\")\n text = text.replace('*Acceptance Criteria:*', \"\")\n text = text.replace('*Acceptance Criteria*:', \"\")\n text = text.replace('*Acceptance Criteria*', \"\")\n text = text.replace('*Implementation Notes:*', \"\")\n text = text.replace('*Implementation Notes*:', \"\")\n text = text.replace('*Implementation Notes*', \"\")\n text = text.replace(':\\r\\n****', \" \")\n text = text.replace('\\r\\n****', \". \")\n text = text.replace(':\\n****', \" \")\n text = text.replace('\\n****', \". \")\n text = text.replace(':\\r\\n***', \" \")\n text = text.replace('\\r\\n***', \". \")\n text = text.replace(':\\n***', \" \")\n text = text.replace('\\n***', \". \")\n text = text.replace(':\\r\\n**', \" \")\n text = text.replace('\\r\\n**', \". \")\n text = text.replace(':\\n**', \" \")\n text = text.replace('\\n**', \". \")\n text = text.replace(':\\r\\n*', \" \")\n text = text.replace('\\r\\n*', \". \")\n text = text.replace(':\\n*', \" \")\n text = text.replace('\\n*', \". \")\n text = text.replace(':\\r\\n\\r\\n', \" \")\n text = text.replace('\\r\\n\\r\\n', \". \")\n text = text.replace(':\\r\\n', \" \")\n text = text.replace('\\r\\n', \". \")\n text = text.replace('.\\n', \". \")\n text = text.replace('\\n', \" \")\n text = text.replace('.\\r', \". \")\n text = text.replace('\\r', \" \")\n text = text.replace('\\\\n', '\\n')\n text = text.replace('\\\\t', '\\t')\n text = text.replace('\\\\r', '\\r')\n text = text.replace('\\n', \" \")\n text = text.replace('\\r', \" \")\n text = text.replace('\\t', \" \")\n text = ' '.join(text.split())\n return text", "def remove_extra_newlines(data, akn=True):\n if akn:\n data = data[1:]\n data = re.sub(r'\\n\\s*\\n', '\\n', data)\n return data", "def fix_hppos(f):\n with open(f, 'r+') as file:\n d = file.readlines()\n file.seek(0)\n for i in d[:-1]: # Write all but last line\n file.write(i)\n l = d[-1].split(' ')\n if len(l) == 5: # If final line is complete, write it too\n file.write(d[-1])\n file.truncate() # Remove bad stuff", "def fix_header(po):\r\n\r\n # By default, django-admin.py makemessages creates this header:\r\n #\r\n # SOME DESCRIPTIVE TITLE.\r\n # Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER\r\n # This file is distributed under the same license as the PACKAGE package.\r\n # FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.\r\n\r\n po.metadata_is_fuzzy = [] # remove [u'fuzzy']\r\n header = po.header\r\n fixes = (\r\n ('SOME DESCRIPTIVE TITLE', EDX_MARKER),\r\n ('Translations template for PROJECT.', EDX_MARKER),\r\n ('YEAR', str(datetime.utcnow().year)),\r\n ('ORGANIZATION', 'edX'),\r\n (\"THE PACKAGE'S COPYRIGHT HOLDER\", \"EdX\"),\r\n (\r\n 'This file is distributed under the same license as the PROJECT project.',\r\n 'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.'\r\n ),\r\n (\r\n 'This file is distributed under the same license as the PACKAGE package.',\r\n 'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.'\r\n ),\r\n ('FIRST AUTHOR <EMAIL@ADDRESS>', 'EdX Team <info@edx.org>'),\r\n )\r\n for src, dest in fixes:\r\n header = header.replace(src, dest)\r\n po.header = header", "def clean_hanging_newline(t):\n if t and t[-1] == \"\\n\":\n return t[:-1]\n return t", "def decode_content(raw_content):\n return raw_content", "def sanitize_reply_buffer(self): \n for i in self.async_reply_buffer:\n\n if not i.endswith('\\n'):\n \n i = self.async_reply_buffer.index(i)\n temp = self.async_reply_buffer\n #with suppress(IndexError):\n if i+1 == len(temp):\n return 'SANFAIL'\n if i < len(temp):\n #print(i)\n #print(len(temp))\n #print(temp)\n #print(temp[i])\n #print(temp[i+1])\n temp[i] = temp[i] + temp[i+1]\n temp.pop(i+1)\n self.async_reply_buffer = temp\n\n\n #print(self.async_reply_buffer)", "def clean_data(self):\n for line in self.file:\n if line.startswith('//') or line.isspace():\n continue\n if '//' in line:\n line = line.split('//')[0]\n line = line.replace('\\n', '')\n line = line.replace(' ','')\n self.commands.append(line)", "def cleanUpString(text):\r\n if text is None or text == '':\r\n return text\r\n try:\r\n text = text.encode(\"utf-8\")\r\n except:\r\n newText = \"\"\r\n t = text.decode(\"utf-8\")\r\n for c in t:\r\n newC = c\r\n if ord(c)>127:\r\n newC = \"&#%s;\" % ord(c)\r\n if ord(c)==8211:\r\n #change to this otherwise the toc has &#8211; value instead of endash\r\n newC = chr(45)\r\n if ord(c)==160:\r\n #&nbsp;\r\n newC = \" \"\r\n newText += newC\r\n text = newText\r\n text = str(text)\r\n return text", "def test_raw_feed(self):\n self.assertEqual(self.feed.feed.raw[:6].decode('utf-8'), \"<?xml \")", "def strip_fileendings(fName):\n with open(fName, 'rb') as f:\n data = f.readlines()\n\n data_new = [d.replace(\"\\r\\n\", \"\\n\") for d in data]\n\n with open(fName, 'wb') as f:\n for row in data_new:\n f.write(row)", "def prep_difflines(content):\n return [ x+\"\\n\" for x in content.split(\"\\n\") ]", "def _header_transformer(self, lines):\n needle = b'--%s\\n' % self.boundary\n in_header = False\n for line in lines:\n if line == needle:\n in_header = True\n if in_header:\n assert line[-1] == b'\\n'\n line = line[:-1] + b'\\r\\n'\n if line == b'\\r\\n':\n in_header = False\n yield line", "def convert_header(contents):\n replacement = re.sub(r\"WEBVTT\\n\", \"\", contents)\n replacement = re.sub(r\"Kind:[ \\-\\w]+\\n\", \"\", replacement)\n replacement = re.sub(r\"Language:[ \\-\\w]+\\n\", \"\", replacement)\n return replacement", "def modify(s):\n\tl = s.splitlines(True)\n\ttry:\n\t\tif l[1] in ('\\r\\n', '\\n', '</br>'):\n\t\t\tl = everyother(l)\n\texcept IndexError:\n\t\tpass\n\ttry:\n\t\tif l[-1] not in ('\\r\\n', '\\n', '</br>'):\n\t\t\tl.append('\\r\\n')\n\texcept IndexError:\n\t\tpass\n\treturn ''.join(l)", "def mangle_response(self, response):\n body = response.get_body()\n\n for regex, string in self._manglers['s']['b']:\n body = regex.sub(string, body)\n\n response.set_body(body)\n\n header_string = str(response.get_headers())\n\n for regex, string in self._manglers['s']['h']:\n header_string = regex.sub(string, header_string)\n\n try:\n mangled_header = Headers.from_string(header_string)\n except ValueError:\n error = 'Your header modifications created an invalid header'\\\n ' string that could NOT be parsed back to a Header object.'\n om.out.error(error)\n else:\n response.set_headers(mangled_header)\n\n if self._user_option_fix_content_len:\n response = self._fix_content_len(response)\n\n return response", "def remove_bad_characters(self):\n\n self.categorie_name = self.categorie_name.replace(\"\\n\", \"\")", "def __remove_break_line__(self, string):\n return string.rstrip()", "def end_headers(self):\n self.push_to_wire(\"\\r\\n\")", "def remove_extra_newlines(self):\n self.value = re.sub(self.patterns['extra_newlines'], '\\n', self.value)\n return self", "def contents_without_whitespace(self):\n return self.contents.replace(' ', '').replace('\\n', '')", "def test_parse_hand(self):\n\n hand_lines = [\n '\"-- starting hand #6 (No Limit Texas Hold\\'em) (dealer: \"\"Eddy KGB @ _7OU6FzFZP\"\")'\n ' --\",2020-12-17T00:44:19.590Z,160816585959100',\n '\"Player stacks: #1 \"\"MOP @ jwf61y3XJg\"\" (1060) | #4 \"\"rus @ PjBYO_8gbf\"\" (971) |'\n ' #6 \"\"Eddy KGB @ _7OU6FzFZP\"\" (1025) | #7 \"\"Ben @ eSbnubU-KP\"\" (1057) | #8'\n ' \"\"Max @ izsy1Zibpi\"\" (887)\",2020-12-17T00:44:19.590Z,160816585959101',\n '\"Your hand is Q♠, 3♠\",2020-12-17T00:44:19.590Z,160816585959105',\n '\"\"\"Ben @ eSbnubU-KP\"\" posts a small blind of 5\",'\n \"2020-12-17T00:44:19.590Z,160816585959107\",\n '\"\"\"Max @ izsy1Zibpi\"\" posts a big blind of 10\",'\n \"2020-12-17T00:44:19.590Z,160816585959108\",\n '\"\"\"MOP @ jwf61y3XJg\"\" folds\",2020-12-17T00:44:22.437Z,160816586243800',\n '\"\"\"rus @ PjBYO_8gbf\"\" calls 10\",2020-12-17T00:44:25.141Z,160816586514100',\n '\"\"\"Eddy KGB @ _7OU6FzFZP\"\" calls 10\",2020-12-17T00:44:28.601Z,160816586860200',\n '\"\"\"Ben @ eSbnubU-KP\"\" calls 10\",2020-12-17T00:44:31.296Z,160816587129700',\n '\"\"\"Max @ izsy1Zibpi\"\" checks\",2020-12-17T00:44:32.791Z,160816587279100',\n '\"flop: [J♠, 10♥, 6♥]\",2020-12-17T00:44:33.595Z,160816587359600',\n '\"\"\"Ben @ eSbnubU-KP\"\" checks\",2020-12-17T00:44:40.619Z,160816588062000',\n '\"\"\"Max @ izsy1Zibpi\"\" checks\",2020-12-17T00:44:41.477Z,160816588147800',\n '\"\"\"rus @ PjBYO_8gbf\"\" checks\",2020-12-17T00:44:44.131Z,160816588413200',\n '\"\"\"Eddy KGB @ _7OU6FzFZP\"\" checks\",2020-12-17T00:44:46.017Z,160816588601700',\n '\"turn: J♠, 10♥, 6♥ [Q♦]\",2020-12-17T00:44:46.823Z,160816588682400',\n '\"\"\"Ben @ eSbnubU-KP\"\" checks\",2020-12-17T00:44:50.123Z,160816589012400',\n '\"\"\"Max @ izsy1Zibpi\"\" checks\",2020-12-17T00:44:57.859Z,160816589786000',\n '\"\"\"rus @ PjBYO_8gbf\"\" checks\",2020-12-17T00:44:59.202Z,160816589920300',\n '\"\"\"Eddy KGB @ _7OU6FzFZP\"\" checks\",2020-12-17T00:45:01.677Z,160816590167800',\n '\"river: J♠, 10♥, 6♥, Q♦ [3♣]\",2020-12-17T00:45:02.499Z,160816590250400',\n '\"\"\"Ben @ eSbnubU-KP\"\" bets 30\",2020-12-17T00:45:08.970Z,160816590897100',\n '\"\"\"Max @ izsy1Zibpi\"\" calls 30\",2020-12-17T00:45:10.705Z,160816591070600',\n '\"\"\"rus @ PjBYO_8gbf\"\" calls 30\",2020-12-17T00:45:25.416Z,160816592541700',\n '\"\"\"Eddy KGB @ _7OU6FzFZP\"\" folds\",2020-12-17T00:45:26.287Z,160816592628700',\n '\"\"\"Ben @ eSbnubU-KP\"\" shows a Q♠, 3♠.\",2020-12-17T00:45:27.095Z,160816592709700',\n '\"\"\"Ben @ eSbnubU-KP\"\" collected 130 from pot with Two Pair, Q\\'s & 3\\'s'\n ' (combination: Q♠, Q♦, 3♠, 3♣, J♠)\",2020-12-17T00:45:27.095Z,160816592709701',\n '\"-- ending hand #6 --\",2020-12-17T00:45:27.095Z,160816592709702',\n ]\n\n expected_hand = Hand(\n id_=6,\n players={\n Player(name=\"Ben\", id_=\"eSbnubU-KP\"),\n Player(name=\"Eddy KGB\", id_=\"_7OU6FzFZP\"),\n Player(name=\"MOP\", id_=\"jwf61y3XJg\"),\n Player(name=\"Max\", id_=\"izsy1Zibpi\"),\n Player(name=\"rus\", id_=\"PjBYO_8gbf\"),\n },\n stacks={\n Player(name=\"MOP\", id_=\"jwf61y3XJg\"): 1060,\n Player(name=\"rus\", id_=\"PjBYO_8gbf\"): 971,\n Player(name=\"Eddy KGB\", id_=\"_7OU6FzFZP\"): 1025,\n Player(name=\"Ben\", id_=\"eSbnubU-KP\"): 1057,\n Player(name=\"Max\", id_=\"izsy1Zibpi\"): 887,\n },\n our_cards=(Card.from_string(\"Q♠\"), Card.from_string(\"3♠\")),\n preflop=Street(\n actions=[\n Post(player=Player(name=\"Ben\", id_=\"eSbnubU-KP\"), amount=5),\n Post(player=Player(name=\"Max\", id_=\"izsy1Zibpi\"), amount=10),\n Fold(player=Player(name=\"MOP\", id_=\"jwf61y3XJg\")),\n Call(player=Player(name=\"rus\", id_=\"PjBYO_8gbf\"), amount=10),\n Call(player=Player(name=\"Eddy KGB\", id_=\"_7OU6FzFZP\"), amount=10),\n Call(player=Player(name=\"Ben\", id_=\"eSbnubU-KP\"), amount=10),\n Check(player=Player(name=\"Max\", id_=\"izsy1Zibpi\")),\n ]\n ),\n flop=[\n Card.from_string(\"J♠\"),\n Card.from_string(\"10♥\"),\n Card.from_string(\"6♥\"),\n ],\n first=Street(\n actions=[\n Check(player=Player(name=\"Ben\", id_=\"eSbnubU-KP\")),\n Check(player=Player(name=\"Max\", id_=\"izsy1Zibpi\")),\n Check(player=Player(name=\"rus\", id_=\"PjBYO_8gbf\")),\n Check(player=Player(name=\"Eddy KGB\", id_=\"_7OU6FzFZP\")),\n ]\n ),\n turn=[Card.from_string(\"Q♦\")],\n second=Street(\n actions=[\n Check(player=Player(name=\"Ben\", id_=\"eSbnubU-KP\")),\n Check(player=Player(name=\"Max\", id_=\"izsy1Zibpi\")),\n Check(player=Player(name=\"rus\", id_=\"PjBYO_8gbf\")),\n Check(player=Player(name=\"Eddy KGB\", id_=\"_7OU6FzFZP\")),\n ]\n ),\n river=[Card.from_string(\"3♣\")],\n third=Street(\n actions=[\n Bet(player=Player(name=\"Ben\", id_=\"eSbnubU-KP\"), amount=30),\n Call(player=Player(name=\"Max\", id_=\"izsy1Zibpi\"), amount=30),\n Call(player=Player(name=\"rus\", id_=\"PjBYO_8gbf\"), amount=30),\n Fold(player=Player(name=\"Eddy KGB\", id_=\"_7OU6FzFZP\")),\n Show(\n player=Player(\"Ben\", id_=\"eSbnubU-KP\"),\n cards=(Card.from_string(\"Q♠\"), Card.from_string(\"3♠\")),\n ),\n Collect(player=Player(name=\"Ben\", id_=\"eSbnubU-KP\"), amount=130),\n ]\n ),\n )\n\n actual_hand = parser.parse_hand(hand_lines=hand_lines)\n\n self.assertCountEqual(actual_hand.players, expected_hand.players)\n self.assertEqual(actual_hand.stacks, expected_hand.stacks)\n self.assertEqual(actual_hand.preflop, expected_hand.preflop)\n self.assertEqual(actual_hand.flop, expected_hand.flop)\n self.assertEqual(actual_hand.first, expected_hand.first)\n self.assertEqual(actual_hand.turn, expected_hand.turn)\n self.assertEqual(actual_hand.second, expected_hand.second)\n self.assertEqual(actual_hand.river, expected_hand.river)\n self.assertEqual(actual_hand.third, expected_hand.third)", "def prepare_text_line(line):\n\n re_sub = re.sub\n # FIXME: maintain the original character positions\n\n # strip whitespace\n line = line.strip()\n\n # strip comment markers\n # common comment characters\n line = line.strip('\\\\/*#%;')\n # un common comment line prefix in dos\n line = re_sub('^rem\\s+', ' ', line)\n line = re_sub('^\\@rem\\s+', ' ', line)\n # un common comment line prefix in autotools am/in\n line = re_sub('^dnl\\s+', ' ', line)\n # un common comment line prefix in man pages\n line = re_sub('^\\.\\\\\\\\\"', ' ', line)\n # un common pipe chars in some ascii art\n line = line.replace('|', ' ')\n\n # normalize copyright signs and spacing aournd them\n line = line.replace('(C)', ' (c) ')\n line = line.replace('(c)', ' (c) ')\n # the case of \\251 is tested by 'weirdencoding.h'\n line = line.replace(u'\\251', u' (c) ')\n line = line.replace('&copy;', ' (c) ')\n line = line.replace('&#169;', ' (c) ')\n line = line.replace('&#xa9;', ' (c) ')\n line = line.replace(u'\\xa9', ' (c) ')\n # FIXME: what is \\xc2???\n line = line.replace(u'\\xc2', '')\n\n # TODO: add more HTML entities replacements\n # see http://www.htmlhelp.com/reference/html40/entities/special.html\n # convert html entities &#13;&#10; CR LF to space\n line = line.replace(u'&#13;&#10;', ' ')\n line = line.replace(u'&#13;', ' ')\n line = line.replace(u'&#10;', ' ')\n\n # normalize (possibly repeated) quotes to unique single quote '\n # backticks ` and \"\n line = line.replace(u'`', \"'\")\n line = line.replace(u'\"', \"'\")\n line = re.sub(MULTIQUOTES_RE(), \"'\", line)\n # quotes to space? but t'so will be wrecked\n # line = line.replace(u\"'\", ' ')\n\n # some trailing garbage ')\n line = line.replace(\"')\", ' ')\n\n\n # note that we do not replace the debian tag by a space: we remove it\n line = re_sub(DEBIAN_COPYRIGHT_TAGS_RE(), '', line)\n\n line = re_sub(IGNORED_PUNCTUATION_RE(), ' ', line)\n\n # tabs to spaces\n line = line.replace('\\t', ' ')\n\n # normalize spaces around commas\n line = line.replace(' , ', ', ')\n\n # remove ASCII \"line decorations\"\n # such as in --- or === or !!! or *****\n line = re_sub(ASCII_LINE_DECO_RE(), ' ', line)\n line = re_sub(ASCII_LINE_DECO2_RE(), ' ', line)\n\n # Replace escaped literal \\0 \\n \\r \\t that may exist as-is by a space\n # such as in code literals: a=\"\\\\n some text\"\n line = line.replace('\\\\r', ' ')\n line = line.replace('\\\\n', ' ')\n line = line.replace('\\\\t', ' ')\n line = line.replace('\\\\0', ' ')\n\n # TODO: Why?\n # replace contiguous spaces with only one occurrence\n # line = re.sub(WHITESPACE_RE(), ' ', text)\n\n # normalize to ascii text\n line = commoncode.text.toascii(line)\n # logger.debug(\"ascii_only_text: \" + text)\n\n # strip verbatim back slash and comment signs again at both ends of a line\n # FIXME: this is done at the start of this function already\n line = line.strip('\\\\/*#%;')\n\n # normalize to use only LF as line endings so we can split correctly\n # and keep line endings\n line = commoncode.text.unixlinesep(line)\n # why?\n line = lowercase_well_known_word(line)\n\n return line", "def test_fetchParserIncompleteStringEndsInWhitespace(self):\n p = imap4._FetchParser()\n self.assertRaises(Exception, p.parseString, b\"BODY[HEADER.FIELDS \")", "def convert_content(contents):\n replacement = convert_timestamp(contents)\n replacement = convert_header(replacement)\n replacement = re.sub(r\"<c[.\\w\\d]*>\", \"\", replacement)\n replacement = re.sub(r\"</c>\", \"\", replacement)\n replacement = re.sub(r\"<\\d\\d:\\d\\d:\\d\\d.\\d\\d\\d>\", \"\", replacement)\n replacement = re.sub(r\"::[\\-\\w]+\\([\\-.\\w\\d]+\\)[ ]*{[.,:;\\(\\) \\-\\w\\d]+\\n }\\n\", \"\", replacement)\n replacement = re.sub(r\"Style:\\n##\\n\", \"\", replacement)\n replacement = add_sequence_numbers(replacement)\n return replacement", "def strip_symbol_from_msgs(oChecker):\n\n dNewMsgs = {}\n for sKey, tData in oChecker.msgs.items():\n dNewMsgs[sKey] = (tData[0], tData[2])\n # Monkey patch the checker\n oChecker.msgs = dNewMsgs", "def decode_header(header):\n new_header = {}\n\n for item in header:\n split = item.split('\\t')\n new_header[split[0].replace(':', '')] = split[1].replace(\"\\r\\n\", \"\")\n\n return new_header", "def fix_corrupted_description():\n portal = api.portal.get()\n request = portal.REQUEST\n context = request['PARENTS'][0]\n if not getattr(context, \"description\", False):\n return\n\n context.description.raw = \"\"\n context.description.original_encoding = \"ascii\"\n transaction.commit()", "def clean_robot_error(err_lines):\n # strip the meaningless header\n if len(err_lines) > 2 and err_lines[0].startswith(\"===\"):\n err_lines = err_lines[3:]\n # strip the meaningless footer\n if len(err_lines) > 2 and err_lines[-2].startswith(\"===\"):\n err_lines = err_lines[:-2]\n\n return {\"ename\": \"\", \"evalue\": \"\", \"traceback\": err_lines}", "def pre_process_text_block(block):\n block['content'] = block['content'].strip()", "def clean_crlf(fpath):\n sub = path.basename(path.dirname(fpath))\n \n with open(fpath, 'rb') as f:\n raw_content = f.read()\n lfnull_content = raw_content.replace(b'\\r',b'')\n \n outpath = path.join('..','sourcedata','ds3','sub-'+sub,'sub-'+sub+'_task-all_beh.tsv')\n with open(outpath, 'w') as f:\n f.write(lfnull_content.decode(\"utf-8\"))\n\n return(pd.read_csv(outpath, delimiter='\\t'))", "def convertToCrLf(fileContent):\n if fileContent is None:\n return None\n fileContent = fileContent.replace( WIN_NEW_LINE,'\\n')\n fileContent = fileContent.replace('\\r','\\n')\n fileContent = fileContent.replace('\\n', WIN_NEW_LINE)\n return fileContent", "def emit_line(handle, data):\n if len(data) > 254:\n if \" PAGE \" in data:\n data = data.replace(\"Record Group Title\", \"Record Group\")\n data = data.replace(\"Series Title\", \"Series\")\n data = data.replace(\n \"Washington, D.C.; Washington, D.C.;\", \"Washington, D.C.;\"\n )\n if len(data) > 254:\n logging.error(\"Truncating invalid line length: %s\", data)\n data = \"{0}\\n\".format(data[:254])\n handle.write(data)", "def tidy_string(s: str\n ) -> str:\n s = s.encode('ascii', errors='ignore').decode(FORMAT)\n s = s.replace(\"\\r\", \"\").replace(\"\\t\", \"\").replace('\\n', '') \n return s", "def _strip_excerpt(self, raw_html):\n clean_regex = re.compile(\"<.*?>\")\n clean_text = re.sub(clean_regex, \"\", raw_html)\n return html.unescape(clean_text).replace(\"\\n\", \"\")", "def ignorableWhitespace(self, data):\n pass", "def test_bad_encoding(self, app, data_queues):\n body = b'{\"comment\": \"R\\xe9sum\\xe9 from 1990\", \"items\": []}'\n assert \"Résumé\" in body.decode(\"iso8859-1\")\n with pytest.raises(UnicodeDecodeError):\n body.decode(\"utf-8\")\n headers = {\"Content-Type\": \"application/json; charset=utf-8\"}\n res = self._call(app, body=body, headers=headers, method=\"post\", status=400)\n detail = (\n \"'utf-8' codec can't decode byte 0xe9 in position 14: invalid\"\n \" continuation byte\"\n )\n self.check_response(data_queues, res, \"parse_error\", details={\"decode\": detail})", "def test_fetchParserExpectedWhitespace(self):\n p = imap4._FetchParser()\n self.assertRaises(Exception, p.parseString, b\"BODY[HEADER.FIELDS!]\")", "def handleContentChunk(data):", "def openssl_config_strip(data):\n result = []\n for line in data.split(\"\\n\"):\n work_line = line.strip()\n work_line = re.sub(\"([^#]*)#.*?$\", \"\\\\1\", work_line)\n if len(work_line) > 0:\n result.append(work_line)\n return \"\\n\".join(result)", "def header(self):\n\t\tthismsg = \"\\r\\n\"+self.ESC + \"0m \" +self.A220 + self.A220 + self.A220 + self.A220 + self.A220 +\" \" + self.ESC + \"0m\\r\\n\"\n\t\tthismsg += \" \"+self.A220+self.A220+self.A219+self.A219+self.A219+self.A219+self.ESC+\"1;47m\"+self.A176+self.A177+self.A178+self.ESC+\"40m\"+self.A219+self.A219+self.A220+self.A220+self.ESC+\"0m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"1m \"+self.ESC+\"31mS\"+self.ESC+\"0;31mAGA\"+self.ESC+\"37m \"+self.A219+self.A219+self.ESC+\"30;47mo\"+self.ESC+\"37;40m\"+self.A219+self.ESC+\"1;47m\"+self.A176+self.A176+self.A177+self.A177+self.A178+self.ESC+\"40m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"0m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"1m \"+self.ESC+\"31mO\"+self.ESC+\"0;31mF THE\"+self.ESC+\"37m \"+self.A219+self.A219+self.ESC+\"30;47mO\"+self.ESC+\"37;40m\"+self.A219+self.A219+self.ESC+\"1;47m\"+self.A176+self.A177+self.A177+self.A178+self.ESC+\"40m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"0m \"+self.A220+self.A220+self.A220+self.ESC+\"1m\"+self.A220+self.A220+self.ESC+\"0m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"1m \"+self.ESC+\"31mR\"+self.ESC+\"0;31mED\"+self.ESC+\"37m \"+self.A219+self.ESC+\"30;47mo\"+self.ESC+\"37;40m\"+self.A219+self.A219+self.ESC+\"1;47m\"+self.A176+self.A176+self.A177+self.A178+self.ESC+\"40m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"0m \"+self.A223+self.A219+self.ESC+\"1;47m\"+self.A176+self.A219+self.A219+self.A219+self.ESC+\"40m\"+self.A220+self.A220+\" \"+self.ESC+\"0;31m\"+self.A220+self.ESC+\"37m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"31m \"+self.ESC+\"1mD\"+self.ESC+\"0;31mRAGON 0.9.9\"+self.ESC+\"37m \"+self.A223+self.A219+self.A219+self.ESC+\"1;47m\"+self.A176+self.A177+self.A177+self.A178+self.A219+self.ESC+\"40m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A223+self.ESC+\"0m \"+self.A219+self.ESC+\"1;47m\"+self.A219+self.A219+self.ESC+\"40m\"+self.A223+self.A223+self.ESC+\"0;31m\"+self.A220+self.ESC+\"1;41m\"+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+self.ESC+\"37m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"31m concept\"+self.ESC+\"37m \"+self.A223+self.ESC+\"1;47m\"+self.A176+self.A177+self.A177+self.A178+self.A178+self.ESC+\"40m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A223+self.ESC+\"0m \"+self.ESC+\"1m\"+self.A220+self.ESC+\"0m \"+self.ESC+\"1m\"+self.A220+self.ESC+\"0m \"+self.ESC+\"31m\"+self.A220+self.A220+self.ESC+\"1;41m\"+self.A176+self.A178+\" \"+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+self.A220+self.ESC+\"37m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"31m Seth Robinson \"+self.ESC+\"37m\"+self.A222+\" \"+self.A223+self.A223+self.ESC+\"1;47m\"+self.A178+self.ESC+\"40m\"+self.A219+self.A219+self.A219+self.A219+self.A223+self.A223+self.ESC+\"0m \"+self.A220+self.ESC+\"1;47m\"+self.A220+self.ESC+\"40m\"+self.A223+\" \"+self.ESC+\"0m\"+self.A220+self.A220+self.ESC+\"1;47m\"+self.A220+self.ESC+\"40m\"+self.A223+self.A223+\" \"+self.ESC+\"0m\"+self.A223+self.A219+self.A220+self.ESC+\"1m\"+self.A220+\" \"+self.ESC+\"0;31m\"+self.A223+self.ESC+\"1;41m\"+self.A176+self.A177+self.A178+\" \"+self.ESC+\"0;31m\"+self.A223+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+self.ESC+\"37m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"31m by\"+self.ESC+\"0m \"+self.A219+\" \"+self.A220+self.ESC+\"1;47m\"+self.A220+self.ESC+\"40m\"+self.A223+\" \"+self.ESC+\"0m\"+self.A220+self.ESC+\"1;47m\"+self.A220+self.A220+self.A219+self.ESC+\"40m\"+self.A223+self.ESC+\"0m \"+self.A223+self.ESC+\"1;47m\"+self.A176+self.A219+self.ESC+\"40m\"+self.A220+\" \"+self.ESC+\"0;31m\"+self.A223+self.ESC+\"1;41m\"+self.A177+self.A178+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A176+\" \"+self.A176+\" \"+self.A176+self.ESC+\"0;31m\"+self.A220+self.ESC+\"37m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"1;34m J\"+self.ESC+\"0;34m.\"+self.ESC+\"1mT\"+self.ESC+\"0;34m.\"+self.ESC+\"1mS\"+self.ESC+\"0;34mage\"+self.ESC+\"0m \"+self.A219+self.ESC+\"1;30;47m\"+self.A176+self.ESC+\"0;37;40m\"+self.A221+\" \"+self.A220+self.ESC+\"1;47m\"+self.A177+self.A176+self.ESC+\"40m\"+self.A223+\" \"+self.ESC+\"0m\"+self.A220+self.ESC+\"1;47m\"+self.A220+self.A220+self.A219+self.ESC+\"40m\"+self.A223+self.ESC+\"0m \"+self.A223+self.ESC+\"1;47m\"+self.A177+self.A219+self.A219+self.ESC+\"40m\"+self.A220+\" \"+self.ESC+\"0;31m\"+self.A223+self.ESC+\"1;41m\"+self.A177+self.A176+self.A178+\" \"+self.ESC+\"0;31m\"+self.A220+self.A223+self.ESC+\"1;41m\"+self.A176+self.A178+self.A176+self.A176+self.A177+self.A177+self.ESC+\"0;37;40m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += \" \"+self.A219+self.A219+\" \"+self.ESC+\"1;47m\"+self.A176+self.A177+self.A219+self.ESC+\"40m\"+self.A223+\" \"+self.ESC+\"0m\"+self.A223+self.ESC+\"1m\"+self.A223+self.ESC+\"41m\"+self.A223+self.ESC+\"0;31m\"+self.A220+self.A220+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A178+self.A177+self.A176+\" \"+self.A220+self.A220+self.A220+self.A220+self.A223+self.A220+self.ESC+\"1;41m\"+self.A176+self.ESC+\"0;31m\"+self.A220+self.ESC+\"1;41m\"+self.A177+self.ESC+\"0;31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"37;40m \"+self.ESC+\"31m\"+self.A223+self.ESC+\"1;41m\"+self.A177+self.A178+\" \"+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A178+self.A178+self.A176+self.A177+self.A177+self.A219+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.A219+self.ESC+\"1;30;47m\"+self.A176+self.ESC+\"0;37;40m \"+self.A219+self.A223+self.ESC+\"1m\"+self.A223+self.ESC+\"0;31m\"+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+self.A223+self.A223+\" \"+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+self.A223+self.A223+\" \"+self.A176+self.A176+\" \"+self.ESC+\"1;41m\"+self.A176+\" \"+self.A178+self.A178+self.A219+self.ESC+\"0;31m\"+self.A220+\" \"+self.A223+self.ESC+\"1;41m\"+self.A177+self.A176+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A176+self.A178+self.A176+self.A177+self.A177+self.A219+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.A219+self.ESC+\"1;30;47m\"+self.A176+self.ESC+\"0;37;40m \"+self.A219+\" \"+self.ESC+\"31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+\" \"+self.A220+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A178+self.A177+self.A176+self.ESC+\"37m \"+self.ESC+\"31m\"+self.A178+self.A177+self.A177+self.A223+\" \"+self.ESC+\"1;41m\"+self.A176+\" \"+self.A177+self.A178+self.A219+\" \"+self.ESC+\"0;31m\"+self.A220+\" \"+self.A223+self.ESC+\"1;41m\"+self.A177+self.A219+self.ESC+\"0;31m\"+self.A220+self.A223+self.ESC+\"1;41m\"+self.A178+\" \"+self.A177+self.A178+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1;30;47m\"+self.A176+self.ESC+\"0;37;40m\"+self.A219+\" \"+self.A219+\" \"+self.ESC+\"31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"37;40m \"+self.ESC+\"31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A178+self.A177+self.A176+\" \"+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+self.A223+self.ESC+\"37m \"+self.ESC+\"1;31;41m\"+self.A176+\" \"+self.A178+self.A177+self.A219+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A176+self.A177+self.A178+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A220+self.A220+self.A223+self.ESC+\"1;41m\"+self.A178+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1;30;47m\"+self.A176+self.ESC+\"0;37;40m\"+self.A219+self.ESC+\"1;30;47m\"+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A219+self.A178+self.A178+self.A177+self.A176+self.A223+\" \"+self.A220+self.A220+\" \"+self.A223+self.ESC+\"37m \"+self.ESC+\"1;31;41m\"+self.A176+\" \"+self.A177+self.A178+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A177+self.A178+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A176+self.A178+self.A178+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.A219+self.A219+self.A219+\" \"+self.ESC+\"31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A221+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A178+self.A223+self.A223+self.A223+\" \"+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+self.A223+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+\" \"+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A178+self.A178+self.A177+self.A223+\" \"+self.ESC+\"1;41m\"+self.A176+\" \"+self.A177+self.A219+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A176+self.A177+\" \"+self.ESC+\"0;31m\"+self.A220+self.ESC+\"1;41m\"+self.A176+self.A178+self.A178+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.A222+self.ESC+\"1;30;47m\"+self.A176+self.A177+self.ESC+\"0;37;40m\"+self.A221+\" \"+self.ESC+\"31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+\" \"+self.ESC+\"0;31m\"+self.A223+\" \"+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.A220+\" \"+self.A223+self.A223+self.A220+\" \"+self.A223+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A178+self.A178+self.A178+self.ESC+\"37m \"+self.ESC+\"1;31;41m\"+self.A176+\" \"+self.A178+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A223+self.ESC+\"1;41m\"+self.A176+self.A177+self.A178+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A176+self.A178+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.A222+self.ESC+\"1;30;47m\"+self.A176+self.A177+self.ESC+\"0;37;40m\"+self.A219+\" \"+self.ESC+\"31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.ESC+\"0;31m\"+self.A223+self.A223+\" \"+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.A222+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.A220+\" \"+self.A223+self.A178+self.ESC+\"37m \"+self.ESC+\"1;31;41m\"+self.A176+self.A177+self.A178+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A176+self.A177+\" \"+self.A178+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A176+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += \" \"+self.A219+self.ESC+\"1;30;47m\"+self.A177+self.ESC+\"0;37;40m\"+self.A219+\" \"+self.ESC+\"31m\"+self.A222+self.A219+self.ESC+\"1;41m\"+self.A176+self.A176+self.ESC+\"0;31m\"+self.A221+\" \"+self.ESC+\"1;5;32m\"+self.A220+self.A220+self.A223+\" \"+self.ESC+\"0;31;41m \"+self.ESC+\"40m\"+self.A178+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+self.A223+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.A222+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+\" \"+self.A220+self.A220+self.A220+self.A220+self.A223+self.ESC+\"1;41m\"+self.A176+self.A177+self.A178+self.ESC+\"0;31m\"+self.A220+\" \"+self.ESC+\"1;41m\"+self.A176+self.A177+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A176+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.A219+self.ESC+\"1;30;47m\"+self.A176+self.ESC+\"0;37;40m\"+self.A219+\" \"+self.ESC+\"31m\"+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"1;5;32m\"+self.A219+self.A219+self.A223+\" \"+self.ESC+\"0;31m\"+self.A222+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A178+self.A220+self.A222+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.A222+self.A219+self.ESC+\"41m \"+self.ESC+\"37;40m \"+self.ESC+\"31;41m \"+self.ESC+\"40m\"+self.A178+self.A223+\" \"+self.A223+self.ESC+\"1;41m\"+self.A176+self.A177+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A176+self.A177+self.A219+self.A178+self.ESC+\"0;37;40m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += \" \"+self.A222+self.ESC+\"1;30;47m\"+self.A176+self.A177+self.ESC+\"0;37;40m\"+self.A219+\" \"+self.ESC+\"31m\"+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A178+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"1;5;32m\"+self.A223+\" \"+self.ESC+\"0;31m\"+self.A220+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A178+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+\" \"+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.A222+self.A219+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+\" \"+self.A223+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A178+self.A223+\" \"+self.A223+self.ESC+\"1;41m\"+self.A176+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.A177+self.A219+self.A178+self.ESC+\"37;40m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += \" \"+self.A219+self.ESC+\"1;30;47m\"+self.A176+self.ESC+\"0;37;40m\"+self.A221+\" \"+self.ESC+\"31m\"+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A177+self.A176+self.ESC+\"0;31m\"+self.A221+self.A220+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.ESC+\"0;31m\"+self.A223+self.A223+\" \"+self.A222+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.A176+self.ESC+\"0;31m\"+self.A220+self.A220+self.ESC+\"37m \"+self.ESC+\"1;31;41m\"+self.A176+self.A177+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.A176+self.A177+self.A219+self.A178+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.A219+self.ESC+\"1;30;47m\"+self.A177+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A220+self.A220+self.A223+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A176+self.A222+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+self.ESC+\"37m \"+self.ESC+\"31m\"+self.A220+self.ESC+\"1;41m\"+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A177+self.A177+self.A176+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A220+\" \"+self.ESC+\"1;41m\"+self.A176+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.A177+\" \"+self.A219+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.A219+self.A221+\" \"+self.ESC+\"31;41m \"+self.ESC+\"37;40m \"+self.ESC+\"31;41m \"+self.ESC+\"40m\"+self.A177+self.A177+self.A176+self.A178+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A177+self.A176+self.A176+\" \"+self.ESC+\"0;31m\"+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A178+self.A177+self.A176+self.ESC+\"0;31m\"+self.A220+\" \"+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A178+self.A220+self.A220+\" \"+self.ESC+\"1;41m\"+self.A178+self.ESC+\"0;31m\"+self.A220+\" \"+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A177+self.A219+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1m\"+self.A220+\" \"+self.ESC+\"0m\"+self.A223+\" \"+self.ESC+\"31;41m \"+self.ESC+\"40m\"+self.A223+\" \"+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A219+self.A178+self.A178+self.A177+self.A176+self.A176+self.A177+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.A178+self.A177+self.A177+self.A176+\" \"+self.ESC+\"40m\"+self.A220+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A178+self.A177+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A223+self.A178+\" \"+self.ESC+\"1;41m\"+self.A177+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.A176+self.A176+self.A178+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.A223+self.ESC+\"1m\"+self.A219+self.A220+\" \"+self.ESC+\"0;31;41m \"+self.A177+self.A178+self.A176+self.A176+\" \"+self.ESC+\"40m\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A219+self.A176+self.A178+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.A219+self.A178+self.A177+self.A176+self.ESC+\"37;40m \"+self.ESC+\"31m\"+self.A223+\" \"+self.ESC+\"1;41m\"+self.A176+self.ESC+\"0;31m\"+self.A223+\" \"+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.A177+self.A219+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += \" \"+self.A223+self.ESC+\"1;47m\"+self.A223+self.ESC+\"40m\"+self.A219+self.A223+self.ESC+\"0;31m\"+self.A220+self.ESC+\"1;41m\"+self.A178+self.A176+self.A176+\" \"+self.ESC+\"0;31m\"+self.A223+\" \"+self.ESC+\"1;37;47m\"+self.A222+self.ESC+\"40m\"+self.A221+self.A223+self.A220+\" \"+self.ESC+\"0;31m\"+self.A177+\" \"+self.A223+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A221+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.ESC+\"0;31m\"+self.A223+\" \"+self.A220+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+self.A223+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;31m\"+self.A223+\" \"+self.ESC+\"1;41m\"+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.A176+\" \"+self.A177+self.A219+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.A223+self.ESC+\"31m\"+self.A222+self.ESC+\"1;41m\"+self.A177+self.A176+\" \"+self.ESC+\"0;31m\"+self.A223+\" \"+self.ESC+\"1;37;47m\"+self.A222+self.ESC+\"40m\"+self.A221+\" \"+self.ESC+\"47m\"+self.A222+self.ESC+\"40m\"+self.A221+\" \"+self.ESC+\"0;31m\"+self.A178+self.A177+\" \"+self.A223+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"33m\"+self.A220+\" \"+self.ESC+\"31m\"+self.A223+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A176+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+self.A223+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.ESC+\"40m\"+self.A220+self.A223+\" \"+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.A176+\" \"+self.A178+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"31;41m \"+self.A176+self.A176+self.ESC+\"37;40m \"+self.A220+self.ESC+\"1m\"+self.A219+self.ESC+\"0m\"+self.A223+self.ESC+\"1m\"+self.A219+self.A221+\" \"+self.A223+\" \"+self.A223+self.A220+\" \"+self.ESC+\"0;31;41m \"+self.ESC+\"40m\"+self.A223+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+\" \"+self.ESC+\"33m\"+self.A219+self.A219+\" \"+self.ESC+\"31m\"+self.A223+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.A178+self.A177+self.A176+self.ESC+\"40m\"+self.A220+\" \"+self.ESC+\"1;41m\"+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.A176+self.A177+\" \"+self.A219+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += \" \"+self.A220+self.ESC+\"1m\"+self.A219+\" \"+self.A219+self.A221+\" \"+self.A223+\" \"+self.A220+\" \"+self.A223+self.A220+\" \"+self.ESC+\"0;31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+\" \"+self.ESC+\"1;33;43m\"+self.A177+self.A176+self.ESC+\"0;33m\"+self.A219+self.A219+\" \"+self.ESC+\"31m\"+self.A223+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;31m\"+self.A219+self.A220+self.A222+self.ESC+\"1;41m\"+self.A219+self.A177+self.A176+self.ESC+\"0;31m\"+self.A220+self.ESC+\"1;41m\"+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+\" \"+self.A178+self.A219+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += \" \"+self.A223+self.ESC+\"1m\"+self.A220+\" \"+self.A223+\" \"+self.A220+self.A223+self.A220+\" \"+self.A223+\" \"+self.A223+self.ESC+\"0;31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+self.A223+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+\" \"+self.ESC+\"1;33m\"+self.A223+self.ESC+\"43m\"+self.A219+self.A219+self.ESC+\"40m\"+self.A223+self.ESC+\"0;33m\"+self.A220+self.A219+\" \"+self.ESC+\"31;41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;31m\"+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A177+self.A177+self.A176+self.ESC+\"0;31m\"+self.A223+\" \"+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.A177+self.A178+\" \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1m\"+self.A220+\" \"+self.ESC+\"0;31m\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+self.ESC+\"37m \"+self.ESC+\"1;33;43m\"+self.A177+self.A176+self.ESC+\"0;33m\"+self.A219+self.A219+self.A220+\" \"+self.ESC+\"31;41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.A176+self.A177+self.A178+\" \"+self.A219+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"30mÙ\"+self.ESC+\"31m\"+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A177+self.A177+self.A176+self.A176+\" \"+self.ESC+\"0;31m\"+self.A223+self.A223+self.ESC+\"37m \"+self.ESC+\"1;33m\"+self.A223+self.ESC+\"43m\"+self.A219+self.ESC+\"0;33m\"+self.A223+self.A223+self.A223+self.A220+\" \"+self.ESC+\"31;41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+self.A222+self.A223+\" \"+self.A220+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A177+self.A176+\" \"+self.A219+\" \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"31m\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.ESC+\"37m \"+self.ESC+\"1;33m\"+self.A220+self.A220+self.ESC+\"43m\"+self.A176+self.ESC+\"0;33m\"+self.A219+self.A219+\" \"+self.ESC+\"31;41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A220+self.ESC+\"1;41m\"+self.A176+self.A177+self.A178+self.A176+self.A176+\" \"+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += \" \"+self.ESC+\"1;33;43m\"+self.A219+self.A178+self.A177+self.A176+self.ESC+\"0;33m\"+self.A219+\" \"+self.ESC+\"31m\"+self.A223+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+\" \"+self.A223+self.ESC+\"1;41m\"+self.A176+self.A177+self.A177+\" \"+self.ESC+\"0;31m\"+self.A223+self.ESC+\"37m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += \" \"+self.ESC+\"1;33;43m\"+self.A219+self.A178+self.ESC+\"40m\"+self.A223+self.ESC+\"0;33m\"+self.A223+self.A223+self.A220+self.A220+\" \"+self.ESC+\"31;41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A177+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+\" \"+self.A223+self.ESC+\"1;41m\"+self.A177+self.A177+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1;33m\"+self.A220+self.A220+self.ESC+\"43m\"+self.A219+self.A178+self.A177+self.A176+self.ESC+\"0;33m\"+self.A219+self.A220+\" \"+self.ESC+\"31;41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A177+self.A176+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A223+self.ESC+\"41m \"+self.ESC+\"37;40m \"+self.ESC+\"31;41m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1;33m\"+self.A220+self.ESC+\"43m\"+self.A219+self.ESC+\"40m\"+self.A223+self.A223+self.A223+self.ESC+\"0;33m\"+self.A223+self.A220+self.A220+self.A220+\" \"+self.ESC+\"31;41m \"+self.ESC+\"1m\"+self.A178+self.A177+self.A176+self.ESC+\"0;31m\"+self.A223+self.A223+self.A223+self.A223+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+\" \"+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+\" \"+self.A223+self.ESC+\"41m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1;33m\"+self.A220+self.A220+self.ESC+\"43m\"+self.A219+self.A219+self.A178+self.A177+self.A176+self.ESC+\"0;33m\"+self.A219+self.A219+\" \"+self.ESC+\"31m\"+self.A223+self.A223+\" \"+self.A220+self.A220+self.A220+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+\" \"+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+\" \"+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.A220+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1;33;43m\"+self.A219+self.A219+self.ESC+\"40m\"+self.A223+self.A223+self.A223+self.ESC+\"0;33m\"+self.A223+self.A223+self.ESC+\"31m\"+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.A178+self.A177+\" \"+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+\" \"+self.ESC+\"41m \"+self.ESC+\"37;40m \"+self.ESC+\"31m\"+self.A223+self.ESC+\"41m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1;33m\"+self.A220+self.ESC+\"43m\"+self.A178+self.ESC+\"0;33m\"+self.A220+self.A219+self.A223+\" \"+self.ESC+\"31;41m \"+self.ESC+\"1m\"+self.A176+self.A178+self.A177+self.A177+self.A176+self.A176+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+\" \"+self.A223+self.ESC+\"41m \"+self.ESC+\"37;40m \"+self.ESC+\"31;41m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1;33;43m\"+self.A178+self.A177+self.A176+self.ESC+\"0;33m\"+self.A223+self.A223+\" \"+self.ESC+\"31;41m \"+self.ESC+\"1m\"+self.A176+self.A178+self.A178+\" \"+self.A177+\" \"+self.A176+self.A176+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.ESC+\"37;40m \"+self.ESC+\"31m\"+self.A223+self.ESC+\"41m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1;33m\"+self.A220+self.A220+self.ESC+\"0;33m\"+self.A220+self.A220+self.A223+\" \"+self.ESC+\"31;41m \"+self.ESC+\"1m\"+self.A176+self.A219+self.A178+self.A178+self.A177+self.A177+self.A176+\" \"+self.A176+\" \"+self.A176+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+\" \"+self.ESC+\"41m \"+self.ESC+\"37;40m \"+self.ESC+\"31;41m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"0;37m \"+self.ESC+\"1;33m\"+self.A223+self.A223+self.A223+self.ESC+\"0;33m\"+self.A223+\" \"+self.ESC+\"30;41m \"+self.ESC+\"1;31mShatterstar [W/X] \"+self.ESC+\"0;37;40m \"+self.ESC+\"30;41m \"+self.ESC+\"37;40m \"+self.ESC+\"30;41m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"0m\\r\\n\"\n\t\treturn thismsg", "def setUp(self):\n self.single_rfh2_message = open(\n os.path.join(self.messages_dir, \"single_rfh2.dat\"), \"rb\").read()\n self.single_rfh2_message_not_well_formed = \\\n self.single_rfh2_message[0:117] + self.single_rfh2_message[121:]", "def strip_warnings(self, line):\n if line[0] == \"|\":\n return \"\"\n else:\n return line", "def handle_upstream_response(self, raw: bytes) -> bytes:\n return raw # pragma: no cover", "def _trunc_lines(self):\n\t\tif self._appendMessages:\n\t\t\tself._trunc_lines_append()\n\t\telse:\n\t\t\tself._trunc_lines_prepend()", "def test_parse_simple_quote_with_newline(self):\n with self.assertRaisesRegexp(Exception, re.escape(\"the quote included a newline (0x0a) character\")):\n api.parse_quote(\" Quote with \\n character - Author\", simple_format=True)", "def test_stripFormatting(self):\n self.assertEqual(\n irc.stripFormatting(\n irc.assembleFormattedText(\n A.bold[\n A.underline[\n A.reverseVideo[A.fg.red[A.bg.green[\"hello\"]]], \" world\"\n ]\n ]\n )\n ),\n \"hello world\",\n )", "def _process_content_codings(self, chunk):\n content_codings = self.parsed_headers.get('content-encoding', [])\n content_codings.reverse()\n for coding in content_codings:\n # TODO: deflate support\n if coding in ['gzip', 'x-gzip'] and self._decode_ok:\n if not self._in_gzip_body:\n self._gzip_header_buffer += chunk\n try:\n chunk = self._read_gzip_header(\n self._gzip_header_buffer\n )\n self._in_gzip_body = True\n except IndexError:\n return '' # not a full header yet\n except IOError, gzip_error:\n self.add_note('header-content-encoding',\n rs.BAD_GZIP,\n gzip_error=str(gzip_error)\n )\n self._decode_ok = False\n return\n try:\n chunk = self._gzip_processor.decompress(chunk)\n except zlib.error, zlib_error:\n self.add_note(\n 'header-content-encoding', \n rs.BAD_ZLIB,\n zlib_error=str(zlib_error),\n ok_zlib_len=f_num(self.payload_sample[-1][0]),\n chunk_sample=chunk[:20].encode('string_escape')\n )\n self._decode_ok = False\n return\n else:\n # we can't handle other codecs, so punt on body processing.\n self._decode_ok = False\n return\n self._md5_post_processor.update(chunk)\n self.decoded_len += len(chunk)\n return chunk", "def fix_header(file_path):\n logging.warning(\"Couldn't open edf {}. Trying to fix the header ...\".format(file_path))\n f = open(file_path, 'rb')\n content = f.read()\n f.close()\n \n header = content[:256]\n # print(header)\n\n # version = header[:8].decode('ascii')\n # patient_id = header[8:88].decode('ascii')\n # [age] = re.findall(\"Age:(\\d+)\", patient_id)\n # [sex] = re.findall(\"\\s\\w\\s\", patient_id)\n\n recording_id = header[88:168].decode('ascii')\n # startdate = header[168:176]\n # starttime = header[176:184]\n # n_bytes_in_header = header[184:192].decode('ascii')\n # reserved = header[192:236].decode('ascii')\n # THIS IS MESSED UP IN THE HEADER DESCRIPTION\n # duration = header[236:244].decode('ascii')\n # n_data_records = header[244:252].decode('ascii')\n # n_signals = header[252:].decode('ascii')\n \n date = recording_id[10:21]\n day, month, year = date.split('-')\n if month == 'JAN':\n month = '01'\n\n elif month == 'FEB':\n month = '02'\n\n elif month == 'MAR':\n month = '03'\n\n elif month == 'APR':\n month = '04'\n\n elif month == 'MAY':\n month = '05'\n\n elif month == 'JUN':\n month = '06'\n\n elif month == 'JUL':\n month = '07'\n\n elif month == 'AUG':\n month = '08'\n\n elif month == 'SEP':\n month = '09'\n\n elif month == 'OCT':\n month = '10'\n\n elif month == 'NOV':\n month = '11'\n\n elif month == 'DEC':\n month = '12'\n\n year = year[-2:]\n date = '.'.join([day, month, year])\n \n fake_time = '00.00.00'\n \n # n_bytes = int(n_bytes_in_header) - 256\n # n_signals = int(n_bytes / 256)\n # n_signals = str(n_signals) + ' '\n # n_signals = n_signals[:4]\n \n # new_header = version + patient_id + recording_id + date + fake_time + n_bytes_in_header + reserved +\n # new_header += n_data_records + duration + n_signals\n # new_content = (bytes(new_header, encoding=\"ascii\") + content[256:])\n\n new_content = header[:168] + bytes(date + fake_time, encoding=\"ascii\") + header[184:] + content[256:]\n\n # f = open(file_path, 'wb')\n # f.write(new_content)\n # f.close()", "def _chop_end_misc(line):\n return re.sub(r\"\\s+\\d\\d-\\w\\w\\w-\\d\\d\\s+[1-9][0-9A-Z]{3}\\s*\\Z\", \"\", line)", "def body(self, response):\t\n\t\tx = response.xpath(\"//div[@class='story-content row-fluid']/p/text()\").extract()\n\n\t\tfor i in range(0,len(x)):\n\t\t\tx[i] = x[i].strip(\"\\r\\n\\t\")\n\t\treturn x", "def pretty(self):\n\t\tif not self.content.endswith('\\n'):\n\t\t\tself.content += '\\n'", "def strip_gutenberg_header(input_book_lines):\n\tlines = input_book_lines\n\tcurr_line = 0\n\twhile lines[curr_line].find('START OF THIS PROJECT GUTENBERG EBOOK') == -1:\n\t\tcurr_line += 1\n\treturn lines[curr_line+1:]", "def parse_headlines(self):\n headlines = re.findall(r\"^\\.\\.\\.(.*?)\\.\\.\\.[ ]?\\n\\n\", self.unixtext,\n re.M | re.S)\n headlines = [\" \".join(h.replace(\"...\",\n \", \").replace(\"\\n\", \" \").split())\n for h in headlines]\n return headlines" ]
[ "0.64710176", "0.6009694", "0.5937977", "0.5803022", "0.57353634", "0.5620867", "0.5585157", "0.55775535", "0.55016285", "0.5496441", "0.54666466", "0.54155815", "0.53363466", "0.52728117", "0.52605987", "0.52031696", "0.5202499", "0.51547736", "0.5134256", "0.5130388", "0.5126378", "0.5089266", "0.5068171", "0.50627154", "0.5053572", "0.5046354", "0.50364953", "0.5035132", "0.5024572", "0.500912", "0.49903056", "0.49889123", "0.49523383", "0.49399292", "0.49330732", "0.4917593", "0.49069473", "0.49029866", "0.4900731", "0.49002475", "0.4878623", "0.48784146", "0.48604864", "0.4860037", "0.48567605", "0.48567605", "0.4845342", "0.4843683", "0.48411596", "0.4838722", "0.4838248", "0.48299146", "0.48273614", "0.48227438", "0.48212203", "0.48151758", "0.48151004", "0.48150906", "0.48133737", "0.48106816", "0.48063087", "0.48054188", "0.48027745", "0.47993407", "0.47988525", "0.47982135", "0.47865742", "0.47865656", "0.4783317", "0.4779409", "0.47694784", "0.47688255", "0.47681913", "0.4767389", "0.476407", "0.47638026", "0.4762306", "0.4759576", "0.4757024", "0.47545645", "0.47490975", "0.47431368", "0.4707552", "0.47066808", "0.4706579", "0.4706234", "0.46975887", "0.4689656", "0.4688782", "0.46844393", "0.46812537", "0.46777907", "0.46776575", "0.4672877", "0.46680465", "0.46667185", "0.46618757", "0.46590713", "0.4658331", "0.4648485" ]
0.7440384
0
Returns a single message object from a list of text content and attachments in a MIME message, after filtering out unwanted content. Also handles nested content like forwarded messages.
def get_nested_payload(mime_message): return_message = EmailMessage() return_message.subject = mime_message.get('Subject') return_message.sender = clean_sender(mime_message.get('From')) return_message.recipient = clean_recipient(mime_message.get('To')) return_message.date = parse(mime_message.get('Date')) for sub_message in mime_message.walk(): content_type = sub_message.get_content_type() disposition = sub_message.get('Content-Disposition') if content_type == 'text/plain' and disposition is None: x = unicode(sub_message.get_payload()) return_message.append_body(x) elif content_type in _ignored_content_types and disposition is None: pass # throw away contents we don't want else: return_message.add_attachment(sub_message.get_payload(), content_type=content_type, filename=disposition) return return_message
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_content(content):\n attachments = []\n body = None\n html = None\n\n for part in content.walk():\n if part.get('Content-Disposition') is not None:\n decoded_data = decode_attachment(part)\n\n attachment = parse_attachment(part)\n if attachment:\n attachments.append(attachment)\n elif part.get_content_type() == \"text/plain\":\n if body is None:\n body = \"\"\n body += unicode(\n part.get_payload(decode=True),\n part.get_content_charset(),\n 'replace'\n ).encode('utf8', 'replace')\n elif part.get_content_type() == \"text/html\":\n if html is None:\n html = \"\"\n html += unicode(\n part.get_payload(decode=True),\n part.get_content_charset(),\n 'replace'\n ).encode('utf8', 'replace')\n # return the parsed data\n return {\n 'body': body,\n 'html': html,\n 'filename': decoded_data['filename']\n # 'attachments': attachments\n }", "def get_message_from_request(request):\n sender = request.form['from']\n recipients = request.form['to'].split()\n subject = request.form['subject']\n body = request.form['body']\n cc = request.form.get('cc', '').split()\n bcc = request.form.get('bcc', '').split()\n attachments = parse_attachments(request)\n return Message(sender, recipients, subject, body, cc, bcc, attachments)", "def multipart_mixed():\n msg = MIMEMultipart(\"mixed\")\n msg[\"From\"] = sender\n msg[\"To\"] = recipient\n msg[\"Subject\"] = \"Multipart mixed\"\n\n part_1 = MIMEText(\"This is the first part (plaintext)\\n\", \"plain\")\n part_2 = MIMEText(\"This is the second part (HTML)\\n\", \"html\")\n part_3 = MIMEText(\"This is the third part (plaintext)\\n\", \"plain\")\n part_4 = MIMEText(\"This is the fourth part (HTML)\\n\", \"html\")\n\n msg.attach(part_1)\n msg.attach(part_2)\n msg.attach(part_3)\n msg.attach(part_4)\n\n return msg", "def get_message_content(message): # pylint: disable=too-many-return-statements\n if message.content_type == \"photo\":\n return message.photo[0].file_id\n if message.content_type == \"text\":\n return message.text\n if message.content_type == \"audio\":\n return message.audio.file_id\n if message.content_type == \"document\":\n return message.document.file_id\n if message.content_type == \"sticker\":\n return message.sticker.thumb.file_id\n if message.content_type == \"video\":\n return message.video.file_id\n if message.content_type == \"voice\":\n return message.voice.file_id\n return message.text or \"None\"", "def extract (msgfile, key):\n m = email.message_from_file(msgfile)\n From, To, Subject, Date = caption(m)\n #Text, Html, Files, Parts = pullout(m, key)\n Text = Text.strip(); Html = Html.strip()\n msg = {\"subject\": Subject, \"from\": From, \"to\": To, \"date\": Date,\n \"text\": Text, \"html\": Html, \"parts\": Parts}\n if Files: msg[\"files\"] = Files\n return msg", "def contentEmail(email_one):\n parse = email.message_from_string(email_one)\n if parse.is_multipart():\n for payload in parse.walk():\n email_three = payload.get_payload()\n try:\n email_three = email_three\n except AttributeError:\n continue\n return email_three \n else:\n email_two = parse.get_payload()\n email_two = email_two\n return email_two", "def message_without_attachments(self, context, message_payload_parts, from_who, to_whom, subject):\r\n\r\n body_of_part = None\r\n\r\n # достаем из нужной части (текст сообщения хранится под нулевым индексом) текст сообщения закодированный в\r\n # формате \"utf-8\" и \"base64\"\r\n for part in message_payload_parts:\r\n if part['partId'] == '0':\r\n body_of_part = part['body']\r\n\r\n # декодируем\r\n encoded_text = body_of_part['data']\r\n decodedBytes = base64.urlsafe_b64decode(encoded_text)\r\n decoded_text = str(decodedBytes, \"utf-8\") # текст сообщения сохраняем в переменную\r\n\r\n if self.SECRET_KEY in subject or self.SECRET_KEY in decoded_text:\r\n\r\n telebot_message_text = f'Sender: {from_who}.\\n' \\\r\n f'Receiver: {to_whom}.\\n' \\\r\n f'Subject: {subject}.\\n' \\\r\n f'Text of message: {decoded_text}'\r\n\r\n with open('managers.json') as obj:\r\n managers = json.load(obj)\r\n\r\n for m_chat_id in managers.values():\r\n try:\r\n context.bot.send_message(chat_id=m_chat_id, text=telebot_message_text) # отправка сообщения в бот\r\n except:\r\n pass", "async def extract_data_from_msg(msg):\n\n body = msg.get_body(('html', 'plain',))\n\n msg_out = {\n 'status': 'delivered',\n 'subject': msg['Subject'],\n 'received': datetime.datetime.now().isoformat(),\n 'from': msg['From'].addresses[0],\n 'recipients': list(msg['To'].addresses),\n 'original-to': msg['X-Original-To'],\n 'delivered-to': msg['Delivered-To'],\n 'dkim-signature': msg['DKIM-Signature'],\n 'message-id': msg['Message-ID'],\n 'domain-signature': msg['DomainKey-Signature'],\n 'date': msg['Date'].datetime,\n 'return': msg['Return-Path'] or msg['Reply-To'],\n 'in-thread': False,\n 'body-type': body.get_content_type(),\n 'body-charset': body.get_content_charset(),\n 'body': body.get_content(),\n 'attachments': []\n }\n\n for ind, att in enumerate(msg.iter_attachments()):\n msg_out['attachments'].append({\n 'index': ind,\n 'type': att.get_content_type(),\n 'filename': att.get_filename()\n })\n\n if msg['Thread-Topic']:\n msg_out['in_thread'] = True\n msg_out['thread-topic'] = msg['Thread-Topic']\n msg_out['thread-index'] = msg['Thread-index']\n\n return msg_out", "def process_mailbox(M):\n\n rv, data = M.search(None, config.email['search'])\n if rv != 'OK':\n logger.info(\"No messages found!\")\n return\n\n for num in data[0].split():\n rv, data = M.fetch(num, '(RFC822)')\n if rv != 'OK':\n logger.error(\"ERROR getting message\", num)\n return\n\n msg = email.message_from_string(data[0][1])\n\n content, extras = decode_body(msg)\n extraText = \"\"\n if extras:\n extraText = \"\\n \" + unichr(10133) +\" **\" + str(len(extras)) + \" attachments:**\"\n for (name, cont) in extras:\n extraText += \"\\n- \" + str(name)\n # remove markdown which would confuse the parser\n content = re.sub('[\\*_]', '', content)\n if len(content) > config.email['maxLen']:\n content = content[:config.email['maxLen']] + \"... _trimmed_\"\n subject, encoding = email.Header.decode_header(msg['Subject'])[0]\n emailText = \"*From:* \" + msg['From'] + \"\\n*Subject:* \" + subject + \"\\n==========\\n\" + content + \" \" + extraText\n\n send_message(emailText)", "def get_message(obj):\n if isinstance(obj, email.Message.Message):\n return obj\n if hasattr(obj, \"read\"):\n obj = obj.read()\n try:\n msg = email.message_from_string(obj)\n except email.Errors.MessageParseError:\n msg = None\n return msg", "def multipart_nested():\n msg = MIMEMultipart(\"mixed\")\n msg[\"From\"] = sender\n msg[\"To\"] = recipient\n msg[\"Subject\"] = \"Nested multipart email\"\n\n part_1 = MIMEMultipart(\"alternative\")\n part_1_text = MIMEText(\"This is the **first** part\\n\", \"plain\")\n part_1_html = MIMEText(\"This is the <strong>first</strong> part\\n\", \"html\")\n part_1.attach(part_1_text)\n part_1.attach(part_1_html)\n\n part_2 = MIMEText(\"This is the second part\\n\", \"plain\")\n\n msg.attach(part_1)\n msg.attach(part_2)\n\n return msg", "def parse_attachments(request):\n attachments = []\n for attachment in request.files.getlist('attachment'):\n attachments.append(Attachment(attachment.filename, attachment))\n return attachments", "def textparts(msg):\n return set(filter(lambda part: part.get_content_maintype() == 'text',\n msg.walk()))", "def get_text_from_email(msg):\n parts = []\n for part in msg.walk():\n if part.get_content_type() == 'text/plain':\n parts.append(part.get_payload())\n return ''.join(parts)", "def __init__(self, content):\n\t\tself.raw = content\n\t\tself.mail = email.message_from_string(self.raw)\n\n\t\tself.text_content = ''\n\t\tself.html_content = ''\n\t\tself.attachments = []\n\t\tself.cid_map = {}\n\t\tself.parse()\n\t\tself.set_content_and_type()\n\t\tself.set_subject()\n\t\tself.set_from()\n\t\tself.message_id = self.mail.get('Message-ID')\n\n\n\t\tself.unique_id = get_unique_id(self.mail)\n\n\t\t# gmail mailing-list compatibility\n\t\t# use X-Original-Sender if available, as gmail sometimes modifies the 'From'\n\t\t# _from_email = self.mail.get(\"X-Original-From\") or self.mail[\"From\"]\n\t\t# \n\t\t# self.from_email = extract_email_id(_from_email)\n\t\t# if self.from_email:\n\t\t# \tself.from_email = self.from_email.lower()\n\t\t# \n\t\t# #self.from_real_name = email.utils.parseaddr(_from_email)[0]\n\t\t# \n\t\t# _from_real_name = decode_header(email.utils.parseaddr(_from_email)[0])\n\t\t# self.from_real_name = decode_header(email.utils.parseaddr(_from_email)[0])[0][0] or \"\"\n\t\t# \n\t\t# try:\n\t\t# \tif _from_real_name[0][1]:\n\t\t# \t\tself.from_real_name = self.from_real_name.decode(_from_real_name[0][1])\n\t\t# \telse:\n\t\t# \t\t# assume that the encoding is utf-8\n\t\t# \t\tself.from_real_name = self.from_real_name.decode(\"utf-8\")\n\t\t# except UnicodeDecodeError,e:\n\t\t# \tprint e\n\t\t# \tpass\n\n\t\t#self.from_real_name = email.Header.decode_header(email.utils.parseaddr(_from_email)[0])[0][0]\n\t\tself.To = self.mail.get(\"To\")\n\t\tif self.To:\n\t\t\tto = u\"\"\n\t\t\tfor name, encoding in decode_header(self.To):\n\t\t\t\tif encoding:\n\t\t\t\t\tto += name.decode(encoding)\n\t\t\t\telse:\n\t\t\t\t\tto += name\n\t\t\tself.To = to.lower()\n\t\tself.CC = self.mail.get(\"CC\")\n\t\tif self.CC:\n\t\t\tself.CC = self.CC.lower()\n\t\tif self.mail[\"Date\"]:\n\t\t\ttry:\n\t\t\t\tutc = email.utils.mktime_tz(email.utils.parsedate_tz(self.mail[\"Date\"]))\n\t\t\t\tutc_dt = datetime.datetime.utcfromtimestamp(utc)\n\t\t\t\tself.date = convert_utc_to_user_timezone(utc_dt).strftime('%Y-%m-%d %H:%M:%S')\n\t\t\texcept:\n\t\t\t\tself.date = now()\n\t\telse:\n\t\t\tself.date = now()\n\t\tif self.date > now():\n\t\t\tself.date = now()", "def get_message_list(self):\n count = 0\n for msg in self.mbox:\n if msg['From'].find(self.config['tgt_email']) > -1:\n dtime = arrow.get(msg['Date'], 'ddd, D MMM YYYY HH:mm:ss ZZ')\n message = dict({'from': msg['From'],\n 'date': dtime,\n 'subject': msg['Subject']})\n # boundary = msg.get_boundary()\n # if boundary is not None:\n # bounds = [m.start() for m\n # in re.finditer(boundary, str(msg))]\n # else:\n # bounds = list()\n # if len(bounds) > 2:\n # message['text'] = str(msg)[bounds[1]:bounds[2]]\n # else:\n # message['text'] = None\n pl = None\n if msg['Subject'].find(\":\") == -1:\n finished = False\n pl = msg.get_payload()\n while finished is False:\n if isinstance(pl, str):\n finished = True\n elif isinstance(pl, list):\n pl = pl[0].get_payload()\n else:\n raise ValueError(\"Non-list, non-str payload?\")\n break\n message['text'] = self.clean_text(str(pl))\n\n if message['text'] is not None:\n self.messages.append(message)\n count += 1\n # print count\n self.messages.sort(key=lambda item: item['date'])", "def test_fetchSimplifiedBodyMultipart(self):\n self.function = self.client.fetchSimplifiedBody\n self.messages = '21'\n\n # A couple non-multipart messages to use as the inner-most payload\n singles = [\n FakeyMessage(\n {'content-type': 'text/plain'},\n (), b'date', b'Stuff', 54321, None),\n FakeyMessage(\n {'content-type': 'text/html'},\n (), b'date', b'Things', 32415, None)]\n\n # A multipart/alternative message containing the above non-multipart\n # messages. This will be the payload of the outer-most message.\n alternative = FakeyMessage(\n {'content-type': 'multipart/alternative'},\n (), b'', b'Irrelevant', 12345, singles)\n\n # The outer-most message, also with a multipart type, containing just\n # the single middle message.\n mixed = FakeyMessage(\n # The message is multipart/mixed\n {'content-type': 'multipart/mixed'},\n (), b'', b'RootOf', 98765, [alternative])\n\n self.msgObjs = [mixed]\n\n self.expected = {\n 0: {'BODY': [\n [['text', 'plain', None, None, None, None, '5', '1'],\n ['text', 'html', None, None, None, None, '6', '1'],\n 'alternative'],\n 'mixed']}}\n\n return self._fetchWork(False)", "def get_message(service, user_id, msg_id):\n try:\n # grab the message instance\n message = service.users().messages().get(userId=user_id, id=msg_id,format='raw').execute()\n\n # decode the raw string, ASCII works pretty well here\n msg_str = base64.urlsafe_b64decode(message['raw'].encode('ASCII'))\n\n # grab the string from the byte object\n mime_msg = email.message_from_bytes(msg_str)\n\n # check if the content is multipart (it usually is)\n content_type = mime_msg.get_content_maintype()\n if content_type == 'multipart':\n # there will usually be 2 parts the first will be the body in text\n # the second will be the text in html\n parts = mime_msg.get_payload()\n\n # return the encoded text\n final_content = parts[0].get_payload()\n #return final_content\n return final_content\n\n elif content_type == 'text':\n return mime_msg.get_payload()\n #return mime_msg.get_payload()\n\n else:\n return \"\"\n print(\"\\nMessage is not text or multipart, returned an empty string\")\n # unsure why the usual exception doesn't work in this case, but \n # having a standard Exception seems to do the trick\n except Exception as error:\n print(\"An error occured: {}\".format(error))", "def get_first_text_block(email_message_instance):\n maintype = email_message_instance.get_content_maintype()\n if maintype == 'multipart':\n for part in email_message_instance.get_payload():\n if part.get_content_maintype() == 'text':\n return part.get_payload()\n elif maintype == 'text':\n return email_message_instance.get_payload()", "def _copy_message(self, message):\r\n gmsg = aeemail.EmailMessage(sender=message.from_email,\r\n to=message.to,\r\n subject=message.subject,\r\n body=message.body)\r\n if message.extra_headers.get('Reply-To', None):\r\n gmsg.reply_to = message.extra_headers['Reply-To']\r\n if message.cc:\r\n gmsg.cc = list(message.cc)\r\n if message.bcc:\r\n gmsg.bcc = list(message.bcc)\r\n if message.attachments:\r\n # Must be populated with (filename, filecontents) tuples.\r\n attachments = []\r\n for attachment in message.attachments:\r\n if isinstance(attachment, MIMEBase):\r\n attachments.append((attachment.get_filename(),\r\n attachment.get_payload(decode=True)))\r\n else:\r\n attachments.append((attachment[0], attachment[1]))\r\n gmsg.attachments = attachments\r\n # Look for HTML alternative content.\r\n if isinstance(message, EmailMultiAlternatives):\r\n for content, mimetype in message.alternatives:\r\n if mimetype == 'text/html':\r\n gmsg.html = content\r\n break\r\n return gmsg", "def get_decoded_email_body(message_body):\n\n msg = email.message_from_string(message_body)\n\n text = \"\"\n if msg.is_multipart():\n html = None\n for part in msg.walk():\n\n # print \"%s, %s\" % (part.get_content_type(), part.get_content_charset())\n\n if part.get_content_charset() is None:\n # We cannot know the character set, so return decoded \"something\"\n text = part.get_payload(decode=True)\n continue\n\n charset = part.get_content_charset()\n\n if part.get_content_type() == 'text/plain':\n text = unicode(part.get_payload(decode=True), str(charset), \"ignore\").encode('utf8', 'replace')\n\n if part.get_content_type() == 'text/html':\n html = unicode(part.get_payload(decode=True), str(charset), \"ignore\").encode('utf8', 'replace')\n\n else:\n continue\n\n if text is not None:\n return text.strip()\n else:\n return html.strip()\n else:\n text = unicode(msg.get_payload(decode=True), msg.get_content_charset(), 'ignore').encode('utf8', 'replace')\n return text.strip()", "def get_msg_by_content(self, content):\n msg_data = self.database.search(self.tname, MsgWithTag.get_msg_key(), content)\n if len(msg_data) != 0:\n return (msg_data[0][0], self.data_to_msg(msg_data[0]))\n return None", "def send_messages(self, email_messages):\n from post_office.mail import create\n from post_office.utils import create_attachments\n\n if not email_messages:\n return\n\n for email_message in email_messages:\n subject = email_message.subject\n from_email = email_message.from_email\n headers = email_message.extra_headers\n message = email_message.message()\n\n # Look for first 'text/plain' and 'text/html' alternative in email\n plaintext_body = html_body = ''\n for part in message.walk():\n if part.get_content_type() == 'text/plain':\n plaintext_body = part.get_payload()\n if html_body:\n break\n if part.get_content_type() == 'text/html':\n html_body = part.get_payload()\n if plaintext_body:\n break\n\n attachment_files = {}\n for attachment in email_message.attachments:\n if isinstance(attachment, MIMEBase):\n attachment_files[attachment.get_filename()] = {\n 'file': ContentFile(attachment.get_payload()),\n 'mimetype': attachment.get_content_type(),\n 'headers': OrderedDict(attachment.items()),\n }\n else:\n attachment_files[attachment[0]] = ContentFile(attachment[1])\n recipients = filter_blacklisted_recipients(email_message.to)\n cc = filter_blacklisted_recipients(email_message.cc)\n bcc = filter_blacklisted_recipients(email_message.bcc)\n if not len(recipients + cc + bcc):\n continue\n email = create(sender=from_email,\n recipients=recipients,\n cc=cc,\n bcc=bcc,\n subject=subject,\n message=plaintext_body,\n html_message=html_body,\n headers=headers)\n\n if attachment_files:\n attachments = create_attachments(attachment_files)\n\n email.attachments.add(*attachments)\n\n if get_default_priority() == 'now':\n email.dispatch()", "def media(self, request, *args, **kwargs):\n conversation = self.get_object()\n media_attachments = conversation.media_attachments\n self.pagination_class = ShoutitPageNumberPagination\n page = self.paginate_queryset(media_attachments)\n # Todo: Only keep the message attachments that were not deleted by this user\n serializer = MessageAttachmentSerializer(page, many=True, context={'request': request})\n return self.get_paginated_response(serializer.data)", "def get_inbox(character):\n messages = get_messages(character)\n return [ Mail(message) for message in messages ]", "def message_parser(msg):\n # Start a new message\n new_msg = {\n \"messageType\": msg[\"messageType\"],\n \"messageID\": msg[\"messageID\"],\n \"messageURL\": msg[\"messageURL\"],\n \"messageIssueTime\": msg[\"messageIssueTime\"],\n 'messageBody': {}\n }\n # Break down the incoming message's messageBody and save to new message\n sections = msg[\"messageBody\"].split(\"\\n## \")\n for part in sections:\n try:\n header, body = part.split(\":\", 1) # only split on first occurrence of colon, not all occurrences (ie dates)\n header = header.strip(\"##\").replace(\" \", \"_\").lower() # clean up headers\n body = body.lstrip(\" \").replace(\"\\n\", \" \").replace(\"#\", \"\")\n if header:\n new_msg[\"messageBody\"][header] = body\n except ValueError:\n continue\n # Break down notes if present and save to new message\n if \"notes\" in new_msg[\"messageBody\"] and new_msg[\"messageBody\"][\"notes\"]:\n try:\n notes_wo_dsc = new_msg[\"messageBody\"][\"notes\"].split(\"Disclaimer\")[0] # First set the important stuff to a var\n new_msg[\"messageBody\"][\"notes\"] = {} # now turn notes into an object\n parent_header, children = notes_wo_dsc.split(\":\", 1)\n parent_header = parent_header.lstrip(\" \")\n new_msg[\"messageBody\"][\"notes\"][parent_header] = {} # make a new object for more children\n child_parts = children.split(\" \")\n child_header = None\n new_body = \"\"\n for part in child_parts:\n if part.endswith(\":\"):\n child_header = part.strip(\":\")\n else:\n new_body += part + \" \"\n if child_header:\n new_msg[\"messageBody\"][\"notes\"][parent_header][child_header] = new_body\n except ValueError:\n pass\n # We don't need the disclaimers taking up memory\n if \"disclaimer\" in new_msg[\"messageBody\"]:\n del new_msg[\"messageBody\"][\"disclaimer\"]\n return new_msg", "def process_message(message):\r\n message = gensim.utils.to_unicode(message, 'latin1').strip()\r\n blocks = message.split(u'\\n\\n')\r\n # skip email headers (first block) and footer (last block)\r\n content = u'\\n\\n'.join(blocks[1:])\r\n return content", "def extract_messages(self,msg_list):\n msgs = []\n for m in msg_list:\n msgs.append(json.loads(str(m)))\n return msgs", "def _process_incoming_mail(raw_message, recipients):\n recipients = [x[1] for x in email.utils.getaddresses([recipients])]\n\n incoming_msg = mail.InboundEmailMessage(raw_message)\n\n if 'X-Google-Appengine-App-Id' in incoming_msg.original:\n raise InvalidIncomingEmailError('Mail sent by App Engine')\n\n # Use the subject to find the issue number.\n # Originally the tag was (issueNNN).\n # Then we changed it to be (issue NNN by WHO).\n # We want to match either of these, and we need to deal with\n # the fact that some mail readers will fold the long subject,\n # turning a single space into \"\\r\\n \".\n # We use \"issue\\s*\" to handle all these forms,\n # and we omit the closing ) to accept both the original and the \"by WHO\" form.\n subject = incoming_msg.subject or ''\n match = re.search(r'\\(issue\\s*(?P<id>\\d+)', subject)\n if match is None:\n raise InvalidIncomingEmailError('No issue id found: %s', subject)\n issue_id = int(match.groupdict()['id'])\n issue = models.Issue.get_by_id(issue_id)\n if issue is None:\n raise InvalidIncomingEmailError('Unknown issue ID: %d' % issue_id)\n sender = email.utils.parseaddr(incoming_msg.sender)[1]\n\n body = None\n for _, payload in incoming_msg.bodies('text/plain'):\n # FIXME(andi): Remove this when issue 2383 is fixed.\n # 8bit encoding results in UnknownEncodingError, see\n # http://code.google.com/p/googleappengine/issues/detail?id=2383\n # As a workaround we try to decode the payload ourselves.\n if payload.encoding == '8bit' and payload.charset:\n body = payload.payload.decode(payload.charset)\n # If neither encoding not charset is set, but payload contains\n # non-ASCII chars we can't use payload.decode() because it returns\n # payload.payload unmodified. The later type cast to db.Text fails\n # with a UnicodeDecodeError then.\n elif payload.encoding is None and payload.charset is None:\n # assume utf-8 but set replace flag to go for sure.\n body = payload.payload.decode('utf-8', 'replace')\n else:\n body = payload.decode()\n break\n if body is None or not body.strip():\n raise InvalidIncomingEmailError('Ignoring empty message.')\n elif len(body) > django_settings.RIETVELD_INCOMING_MAIL_MAX_SIZE:\n # see issue325, truncate huge bodies\n trunc_msg = '... (message truncated)'\n end = django_settings.RIETVELD_INCOMING_MAIL_MAX_SIZE - len(trunc_msg)\n body = body[:end]\n body += trunc_msg\n\n # If the subject is long, this might come wrapped into more than one line.\n subject = ' '.join([x.strip() for x in subject.splitlines()])\n msg = models.Message(issue_key=issue.key, parent=issue.key,\n subject=subject,\n sender=sender,\n recipients=[x for x in recipients],\n date=datetime.datetime.now(),\n text=body,\n draft=False)\n\n # Add sender to reviewers if needed.\n all_emails = [str(x).lower()\n for x in ([issue.owner.email()] +\n issue.reviewers +\n issue.cc +\n issue.collaborator_emails())]\n if sender.lower() not in all_emails:\n query = models.Account.query(models.Account.lower_email == sender.lower())\n account = query.get()\n if account is not None:\n issue.reviewers.append(account.email) # e.g. account.email is CamelCase\n else:\n issue.reviewers.append(db.Email(sender))\n\n issue.calculate_updates_for(msg)\n issue.put()\n msg.put()", "def create_multipart_message(\n sender: str, recipients: list, title: str, text: str = None, html_text: str = None, attachments: list = None) \\\n -> MIMEMultipart:\n multipart_content_subtype = 'alternative' if text and html_text else 'mixed'\n msg = MIMEMultipart(multipart_content_subtype)\n msg['Subject'] = title\n msg['From'] = sender\n msg['To'] = ', '.join(recipients)\n\n # Record the MIME types of both parts - text/plain and text/html.\n # According to RFC 2046, the last part of a multipart message, in this case the HTML message, is best and preferred.\n if text:\n part = MIMEText(text, 'plain')\n msg.attach(part)\n if html_text:\n part = MIMEText(html_text, 'html')\n msg.attach(part)\n\n # Add attachments\n for attachment in attachments or []:\n with open(attachment, 'rb') as f:\n part = MIMEApplication(f.read())\n part.add_header('Content-Disposition', 'attachment', filename=os.path.basename(attachment))\n msg.attach(part)\n\n return msg", "def messages(self, request, *args, **kwargs):\n conversation = self.get_object()\n related = ['user__profile']\n related2 = ['attachments']\n messages_qs = conversation.messages.all().select_related(*related).prefetch_related(*related2)\n self.pagination_class = DateTimePagination\n page = self.paginate_queryset(messages_qs)\n\n # Only keep the messages that were not deleted by this user\n messages_ids = map(lambda m: m.id, page.object_list)\n deleted_messages_ids = request.user.deleted_messages.filter(id__in=messages_ids).values_list('id', flat=True)\n for message in page.object_list:\n if message.id in deleted_messages_ids:\n page.object_list.remove(message)\n\n serializer = MessageSerializer(page, many=True, context={'request': request})\n conversation.mark_as_read(request.user)\n return self.get_paginated_response(serializer.data)", "def message_with_attachments(self, session, mid, context, zero_part, message_payload_parts,\r\n from_who, to_whom, subject):\r\n\r\n zero_part_parts = zero_part['parts']\r\n sub_zero_part = zero_part_parts[0]\r\n body_of_part = sub_zero_part['body']\r\n\r\n # декодируем\r\n encoded_text = body_of_part['data']\r\n decodedBytes = base64.urlsafe_b64decode(encoded_text)\r\n decoded_text = str(decodedBytes, \"utf-8\") # текст сообщения сохраняем в переменную\r\n\r\n if self.SECRET_KEY in subject or self.SECRET_KEY in decoded_text:\r\n\r\n telebot_message_text = f'Sender: {from_who}.\\n' \\\r\n f'Receiver: {to_whom}.\\n' \\\r\n f'Subject: {subject}.\\n' \\\r\n f'Text of message: {decoded_text}'\r\n\r\n with open('managers.json') as obj:\r\n managers = json.load(obj)\r\n\r\n for m_chat_id in managers.values():\r\n try:\r\n context.bot.send_message(chat_id=m_chat_id, text=telebot_message_text) # отправка сообщения в бот\r\n except:\r\n pass\r\n\r\n self.get_and_send_attachments(session, mid, message_payload_parts, context, m_chat_id)", "def GetMimeMessage(service, user_id, msg_id, idx):\n try:\n message = service.users().messages().get(userId=user_id, id=msg_id,\n format='raw').execute()\n\n msg_str = base64.urlsafe_b64decode(message['raw'].encode('ASCII'))\n mail = mailparser.parse_from_bytes(msg_str)\n\n msg_str = str(mail.text_plain)\n msg_str = msg_str.strip(\"\")\n msg_str = clean_text(msg_str)\n msg_str = preprocess(msg_str)\n\n #print(msg_str)\n\n except errors.HttpError:\n print('An error occurred:')\n\n try:\n met = service.users().messages().get(userId=user_id, id=msg_id, format='metadata').execute()\n\n pay = met['payload']\n head = pay['headers']\n sub=\"\"\n for h in head:\n if (h['name'] == 'Subject'):\n sub = \"Subject: \"+str(h['value'])\n except errors.HttpError:\n print('An error occurred:')\n filename = \"./ham/email\"\n file_extension = \".txt\"\n new_fname = \"{}-{}{}\".format(filename, idx, file_extension)\n #print(new_fname)\n f= open(new_fname,\"w+\")\n f.write(sub+\"\\n\")\n f.write(msg_str)\n f.close()", "def get_message(self, **kwargs):\n message = Mail()\n if \"from_email\" in kwargs:\n sender = Email()\n message_content = kwargs.get(\"message_content\", \"\")\n sender.name = message_content.get(\"sender\", emailconf.DEFAULT_SENDER)\n sender.email = kwargs.get(\"from_email\", emailconf.DEFAULT_SENDER_EMAIL)\n message.from_email = sender\n if \"subject\" in kwargs:\n message.subject = kwargs.get(\"subject\", \"\")\n if \"text\" in kwargs:\n content = Content(\"text/plain\", kwargs.get(\"text\", \"\"))\n message.add_content(content)\n if \"html\" in kwargs:\n content = Content(\"text/html\", kwargs.get(\"html\", \"\"))\n message.add_content(content)\n if \"category\" in kwargs:\n category = Category(kwargs.get(\"category\", \"\"))\n message.add_category(category)\n\n personalization = self.create_personalization(**kwargs)\n if personalization:\n message.add_personalization(personalization)\n\n return message.get()", "def process_message(message):\n return {\n \"subject\": message.subject,\n \"sender\": message.sender_name,\n \"header\": message.transport_headers,\n \"body\": message.plain_text_body,\n \"creation_time\": message.creation_time,\n \"submit_time\": message.client_submit_time,\n \"delivery_time\": message.delivery_time,\n \"attachment_count\": message.number_of_attachments,\n }", "def octetparts(msg):\n return set(filter(lambda part:\n part.get_content_type() == 'application/octet-stream',\n msg.walk()))", "def handleMsg(mailbox, msg, is_subpart=False, strdate=\"\"):\r\n global text\r\n global attachments\r\n global fieldFrom, fieldSubject, fieldTime\r\n\r\n # Message/RFC822 parts are bundled this way ==============\r\n while isinstance(msg.get_payload(),email.Message.Message):\r\n msg=msg.get_payload()\r\n\r\n if not is_subpart:\r\n fieldFrom = \"\"\r\n fieldSubject = \"\"\r\n fieldTime = None # fieldTime is a 9-item tuple\r\n text = \"\" # the text contents of a message\r\n attachments = \"\"\r\n\r\n ## Set the \"From\" Field ==================================\r\n if fieldFrom == \"\" and msg['From'] != None:\r\n text += \"To: %s\\n\" % decode_field(msg['To'])\r\n if msg['Cc'] != None:\r\n text += \"Cc: %s\\n\" % decode_field(msg['Cc'])\r\n if msg['Bcc'] != None:\r\n text += \"Bcc: %s\\n\" % decode_field(msg['Bcc'])\r\n text += \"From: %s\\n\" % decode_field(msg['From'])\r\n fieldFrom = decode_field(msg['From'])\r\n\r\n ## Set the \"Subject\" Field ===============================\r\n if fieldSubject == \"\" and msg['Subject'] != None:\r\n fieldSubject = decode_field(msg['Subject'])\r\n text += \"Subject: %s\\n\" % fieldSubject\r\n\r\n ## Set the \"Date\" Field ==================================\r\n if fieldTime == None and msg['Date'] != None:\r\n fieldTime = string2time(msg['Date'])\r\n strdate = time.strftime(\"%Y%m%d%H%M\", fieldTime)\r\n\r\n ## Handle multipart messages recursively =================\r\n if msg.is_multipart():\r\n for submsg in msg.get_payload():\r\n handleMsg(mailbox, submsg, True, strdate)\r\n else:\r\n fname = msg.get_filename()\r\n if fname == None:\r\n if msg.get_content_type() == 'text/plain':\r\n text += \"\\n%s\" % msg.get_payload(decode=1)\r\n else:\r\n fname = \"message.htm\"\r\n\r\n ## Save an attachment to a file ========================\r\n if not fname == None:\r\n fname = decode_field(fname)\r\n filename = \"%s\\\\att_%s\\\\%s_%s\" % (mailboxdir, mailbox, strdate, fname)\r\n org_filename = filename\r\n i = 1\r\n while os.path.exists(filename):\r\n path, ext = os.path.splitext(org_filename)\r\n filename = \"%s (%d)%s\" % (path, i, ext)\r\n i = i + 1\r\n\r\n print \" Found part: %s\" % filename # for debugging purposes\r\n attachments += \"%s\\n\" % filename\r\n fd = open (filename, \"wb\")\r\n data = msg.get_payload(decode=1)\r\n fd.write(data)\r\n\r\n # convert an html message to text\r\n if fname == \"message.htm\":\r\n try:\r\n strio = cStringIO.StringIO()\r\n html2text.html2text_file(data, out=strio.write)\r\n text += strio.getvalue()\r\n strio.close()\r\n except sgmllib.SGMLParseError, e:\r\n print e\r\n\r\n fd.close()\r\n\r\n # if this is the toplevel message (the first function that was called by\r\n # fetch_mailbox, then return the title of the message\r\n if not is_subpart and fieldTime != None:\r\n title = buildTitle(fieldTime, fieldFrom, fieldSubject)\r\n return title", "def get_cleaned_fully_merged_messages(strip_html_content=True,\n resolve_fb_id=False):\n if not _messages_file:\n print(\"Please initialize the facebook_connector module.\")\n return\n chats = None\n with io.open(_messages_file, mode=\"rt\", encoding=\"utf-8\") as handle:\n chats = parser.parse(handle=handle)\n me = chats.user\n addresses = set()\n messages = []\n # Suppressing warning that BS4 will display\n # when a message only contains a URL\n warnings.filterwarnings(\"ignore\", category=UserWarning, module='bs4')\n try:\n threads = chats.threads.itervalues()\n except AttributeError:\n threads = chats.threads.values()\n for thread in threads:\n # This set holds the list of participants after their identifier\n # has been resolved to their name (see resolve_user_id)\n resolved_participants = set()\n for participant in thread.participants:\n if participant is not None and not participant.isspace():\n resolved_participant = resolve_user_id(\n participant) if resolve_fb_id else participant\n resolved_participants.add(resolved_participant)\n addresses.update(resolved_participants)\n for message in thread.messages:\n if not message.content or message.content.isspace():\n continue\n sender = resolve_user_id(\n message.sender) if resolve_fb_id else message.sender\n from_me = sender == me\n if strip_html_content:\n content = BeautifulSoup(message.content, \"html.parser\").text\n else:\n content = message.content\n # In the following we add a single message to our dataframe\n if from_me:\n # If the user is sending a message to a group,\n # then we need to add one message\n # per group participant to the dataframe\n for participant in resolved_participants:\n messages.append({\n 'text': content,\n 'date': message.timestamp,\n 'is_from_me': from_me,\n 'full_name': participant\n })\n else:\n messages.append({\n 'text': content,\n 'date': message.timestamp,\n 'is_from_me': from_me,\n 'full_name': sender\n })\n address_book_df = pd.DataFrame(data=list(addresses), columns=[\"full_name\"])\n messages_df = pd.DataFrame.from_records(messages)\n return messages_df, address_book_df", "def process_mailbox(M):\n rv, data = M.search(None, \"ALL\")\n if rv != 'OK':\n print \"No messages found!\"\n return\n\n ids = data[0]\n id_list = ids.split()\n for num in id_list:\n rv, data = M.fetch(num, '(RFC822)')\n if rv != 'OK':\n print \"ERROR getting message\", num\n return\n\n msg = email.message_from_string(data[0][1])\n decode = email.header.decode_header(msg['Subject'])[0]\n subject = unicode(decode[0])\n print 'Message %s: %s' % (num, subject)\n print 'Raw Date:', msg['Date']\n # Now convert to local date-time\n date_tuple = email.utils.parsedate_tz(msg['Date'])\n if date_tuple:\n local_date = datetime.datetime.fromtimestamp(\n email.utils.mktime_tz(date_tuple))\n print \"Local Date:\", \\\n local_date.strftime(\"%a, %d %b %Y %H:%M:%S\")", "def action_create_mail_messages(self):\n self.check_recipients()\n self.check_message()\n messages = self.env['mail.message']\n for recipient in self.recipient_ids:\n messages |= recipient._create_mail_message()\n return messages", "def test_multiPartExtended(self):\n oneSubPart = FakeyMessage({\n b'content-type': b'image/jpeg; x=y',\n b'content-id': b'some kind of id',\n b'content-description': b'great justice',\n b'content-transfer-encoding': b'maximum',\n }, (), b'', b'hello world', 123, None)\n\n anotherSubPart = FakeyMessage({\n b'content-type': b'text/plain; charset=us-ascii',\n }, (), b'', b'some stuff', 321, None)\n\n container = FakeyMessage({\n 'content-type': 'multipart/related; foo=bar',\n 'content-language': 'es',\n 'content-location': 'Spain',\n 'content-disposition': 'attachment; name=monkeys',\n }, (), b'', b'', 555, [oneSubPart, anotherSubPart])\n\n self.assertEqual(\n [imap4.getBodyStructure(oneSubPart, extended=True),\n imap4.getBodyStructure(anotherSubPart, extended=True),\n 'related', ['foo', 'bar'], ['attachment', ['name', 'monkeys']],\n 'es', 'Spain'],\n imap4.getBodyStructure(container, extended=True))", "def mbox_reader(stream):\n data = stream.read()\n text = data.decode(encoding=\"utf-8\", errors=\"replace\")\n return mailbox.mboxMessage(text)", "def GetMimeMessage(service, user_id, msg_id):\n try:\n message = service.users().messages().get(userId=user_id, id=msg_id,\n format='raw').execute()\n\n #print('Message snippet: %s' % message['snippet'])\n \n\n msg_str = base64.urlsafe_b64decode(message['raw'].encode('ASCII'))\n\n \n\n mime_msg = email.message_from_string(msg_str)\n\n return mime_msg\n \n except errors.HttpError, error:\n print('An error occurred: %s' % error)", "def _build_standard_payload(self, message):\n recipients_list = [\n sanitize_address(addr, message.encoding)\n for addr in message.recipients()]\n recipients = [\n {\"email\": e, \"name\": n}\n for n, e in [parseaddr(r) for r in recipients_list]]\n\n sender = sanitize_address(message.from_email, message.encoding)\n name, email = parseaddr(sender)\n\n payload = {\n 'key': self.api_key,\n 'message': {\n 'text': message.body,\n 'subject': message.subject,\n 'from_email': email,\n 'from_name': getattr(message, 'from_name', None) or name,\n 'to': recipients,\n },\n }\n\n if message.attachments:\n payload['message']['attachments'] = []\n for attachment in message.attachments:\n # django supports two types of attachements:\n # * a subclass of email.mime.base.MIMEBase\n # * a tuple of (filename, content[, mimetype])\n if isinstance(attachment, MIMEBase):\n filename = attachment.get_filename()\n content = attachment.get_payload(decode=True)\n mimetype = attachment.get_content_type()\n else:\n filename = attachment[0]\n content = attachment[1]\n mimetype = (\n attachment[2]\n if len(attachment) > 2 and attachment[2]\n else mimetypes.guess_type(filename)[0]\n )\n payload['message']['attachments'].append({\n 'type': mimetype,\n 'name': str(filename),\n 'content': base64.b64encode(content),\n })\n return payload", "def getMessage(self, msg_id: str) -> str:\n message = self.service.users().messages().get(userId='me', id=msg_id, format='raw').execute()\n msg_str = base64.urlsafe_b64decode(message['raw'].encode('ASCII'))\n mime_msg = email.message_from_bytes(msg_str)\n message_main_type = mime_msg.get_content_maintype()\n \n if message_main_type == 'multipart':\n for part in mime_msg.get_payload():\n if part.get_content_maintype() == 'text':\n return part.get_payload()\n elif message_main_type == 'text':\n return mime_msg.get_payload()", "def extract(request):\n try:\n files = request.FILES.getlist('myFile')\n msg_data = []\n fs = FileSystemStorage()\n for file in files:\n name = file.name.replace(\" \", \"_\")\n if os.path.exists(settings.MEDIA_ROOT + \"\\\\\" + name):\n os.remove(settings.MEDIA_ROOT + \"\\\\\" + name)\n fs.save(settings.MEDIA_ROOT + \"\\\\\" + name, file)\n msg = extract_msg.Message(settings.MEDIA_ROOT + \"\\\\\" + name)\n msg.save_attachments(customPath=settings.MEDIA_ROOT + \"\\\\\")\n attachments = []\n for i in range(0, len(msg.attachments)):\n attachments.append({\n \"filename\": msg.attachments[i].shortFilename,\n \"filepath\": \"/media/\" + msg.attachments[i].shortFilename\n })\n msg_data.append({\n # \"mainProperties\": msg.mainProperties,\n # \"header\": msg.header,\n \"attachments\": attachments,\n \"filename\": file.name,\n \"filepath\": \"/media/\" + name,\n \"from\": msg.sender,\n \"to\": msg.to,\n \"cc\": msg.cc,\n \"subject\": msg.subject,\n \"date\": msg.date,\n \"body\": msg.body,\n })\n msg.close()\n response = {\n \"response\": \"SUCCESS\",\n \"message\": \"File Uploaded!\",\n \"data\": msg_data\n }\n except:\n response = {\n \"response\": \"FAIL\",\n \"message\": \"Erorr in file uploading!\",\n \"data\": msg_data\n }\n return Response(response)", "def _create_mime_attachment(self, content, mimetype):\n basetype, subtype = mimetype.split('/', 1)\n if basetype == 'text':\n encoding = self.encoding or getattr(settings, \"EMAIL_CHARSET\",\n settings.DEFAULT_CHARSET)\n attachment = SafeMIMEText(content, subtype, encoding)\n elif basetype == 'message' and subtype == 'rfc822':\n # Bug #18967: per RFC2046 s5.2.1, message/rfc822 attachments\n # must not be base64 encoded.\n if isinstance(content, EmailMessage):\n # convert content into an email.Message first\n content = content.message()\n elif not isinstance(content, Message):\n # For compatibility with existing code, parse the message\n # into an email.Message object if it is not one already.\n content = message_from_string(content)\n\n attachment = SafeMIMEMessage(content, subtype)\n else:\n # Encode non-text attachments with base64.\n attachment = MIMEBase(basetype, subtype)\n attachment.set_payload(content)\n encoders.encode_base64(attachment)\n return attachment", "def test_multiPart(self):\n oneSubPart = FakeyMessage({\n 'content-type': 'image/jpeg; x=y',\n 'content-id': 'some kind of id',\n 'content-description': 'great justice',\n 'content-transfer-encoding': 'maximum',\n }, (), b'', b'hello world', 123, None)\n\n anotherSubPart = FakeyMessage({\n 'content-type': 'text/plain; charset=us-ascii',\n }, (), b'', b'some stuff', 321, None)\n\n container = FakeyMessage({\n 'content-type': 'multipart/related',\n }, (), b'', b'', 555, [oneSubPart, anotherSubPart])\n\n self.assertEqual(\n [imap4.getBodyStructure(oneSubPart),\n imap4.getBodyStructure(anotherSubPart),\n 'related'],\n imap4.getBodyStructure(container))", "def get_message(self):\n context = self.context\n\n charset = str(context.charset)\n contentType = context.content_type\n\n mail_body = context.render()\n maintype, subtype = contentType.split('/')\n\n return MIMEText(mail_body, subtype, charset)", "def process_message(mail):\n\tmessage = email.message_from_string(mail)\t#parsing metadata\n\tdatetuple = email.utils.parsedate_tz(message.__getitem__('Date'))\n\tfiledirectory = basedirectory\n\tif not datetuple:\n\t\tdatetuple = email.utils.parsedate_tz(message.__getitem__('Delivery-date'))\n\tif directory_for_year: \n\t\tfiledirectory = os.path.join(filedirectory, str(datetuple[0]))\n\tif directory_for_month:\n\t\tfiledirectory = os.path.join(filedirectory, str(datetuple[1])) \n\tdateposix = email.utils.mktime_tz(datetuple)\n\tlocaldate = datetime.datetime.fromtimestamp(dateposix)\n\tdatestring = localdate.strftime('%Y%m%d-%H%M') # +'-'+'-'.join(time.tzname) #\n\tsender = email.utils.parseaddr(message['To'])[1].replace('@','_').replace('.','-')\n\tsubject = email.header.decode_header(message['Subject'])[0][0]\n\tfilename = datestring + '_' + sender[:60] + '_' + subject[:60]\n\n\t# parsing mail content\n\tmailstring = ''\n\tfor headername, headervalue in message.items():\n\t\tmailstring += headername + ': ' + headervalue + '\\r\\n'\t# add \\r\\n or\n\tif message.get_content_maintype() == 'text':\n\t\tmailstring += message.get_payload(decode=True)\n\n\t# handle multipart: \n\telif message.get_content_maintype() == 'multipart':\n\t\tpartcounter = 0\n\t\tfor part in message.walk():\n\t\t\tif part.get_content_maintype() == 'text':\t# also: text/html\n\t\t\t\tfor header, value in part.items():\n\t\t\t\t\tmailstring += header + ': ' + value + '\\r\\n'\n\t\t\t\t\tmailstring += '\\r\\n' + part.get_payload(decode=True) + '\\r\\n'\n\t\t\t# skip multipart containers\n\t\t\telif part.get_content_maintype() != 'multipart':\n\t\t\t\tpartcounter += 1\n\t\t\t\ttry:\n\t\t\t\t\tattachmentname = email.header.decode_header(part.get_filename())[0][0]\n\t\t\t\texcept:\n\t\t\t\t\tattachmentname = \"\"\n\t\t\t\t\tprint(\"Error when parsing filename.\")\n\t\t\t\tif not attachmentname:\n\t\t\t\t\text = mimetypes.guess_extension(part.get_content_type())\n\t\t\t\t\tif not ext:\n\t\t\t\t\t\text = '.bin'\t# use generic if unknown extension\n\t\t\t\t\tattachmentname = 'attachment' + str(partcounter) + ext\n\t\t\t\tattfilename = filename + '_' + attachmentname\n\t\t\t\twrite_to_file(filedirectory, attfilename, part.get_payload(decode=True))\n\twrite_to_file(filedirectory, filename+'.txt', mailstring)", "def _get_message(self, sender_message):\n # type: (str) -> Message or None\n st_re = self.SENDER_TEXT.search(sender_message)\n if st_re is None:\n return None\n else:\n return Message(speaker=st_re.group(1), text=st_re.group(2).strip())", "def export_messages_from_file(\n src_file: Path, msg_ids: Iterable[int], dest_folder: Path = None\n) -> None:\n\n dest_folder = (dest_folder or Path.cwd()) / src_file.stem\n dest_folder.mkdir(parents=True, exist_ok=True)\n\n with open_mail_archive(src_file) as archive:\n for msg_id in msg_ids:\n try:\n # Get message from archive\n msg = archive.get_message_by_id(int(msg_id))\n\n # Process PST or MBOX message and attachments\n if isinstance(archive, MboxArchive):\n # Extract attachments\n attachments = [\n AttachmentMetadata(\n name=part.get_filename(),\n content=part.get_payload(decode=True),\n )\n for part in msg.walk()\n if (\n content_disposition := part.get_content_disposition() or \"\"\n ).startswith(\"attachment\")\n or content_disposition.startswith(\"inline\")\n ]\n\n if attachments:\n # Make directory for this message's attachments\n attachments_folder = dest_folder / f\"{msg_id}_attachments\"\n attachments_folder.mkdir(parents=True, exist_ok=True)\n\n # Write files\n for attachment in attachments:\n (attachments_folder / attachment.name).write_bytes(\n attachment.content\n )\n\n else: # PST archive\n if msg.number_of_attachments > 0:\n # Make directory for this message's attachments\n attachments_folder = dest_folder / f\"{msg_id}_attachments\"\n attachments_folder.mkdir(parents=True, exist_ok=True)\n\n # Extract attachments and write files\n for attachment in msg.attachments:\n buffer = attachment.read_buffer(attachment.size)\n (attachments_folder / attachment.name).write_bytes(buffer)\n\n # Convert message to Python Message type\n msg = Parser(policy=policy.default).parsestr(pff_msg_to_string(msg))\n\n # Write message as eml file\n with (dest_folder / f\"{msg_id}.eml\").open(\n mode=\"w\", encoding=\"utf-8\", errors=\"replace\"\n ) as eml_file:\n Generator(eml_file).flatten(msg)\n\n except Exception as exc:\n logger.warning(\n f\"Skipping message {msg_id} from {src_file}, reason: {exc}\",\n exc_info=True,\n )", "def extract_body(message_dict):\n tagged_parts_list = message_dict[\"structured_text\"][\"text\"]\n body = \"\"\n for part_tag_dict in tagged_parts_list:\n part = part_tag_dict[\"part\"]\n tag = part_tag_dict[\"tags\"]\n if tag == \"BODY\":\n body += part + \" \"\n elif tag == \"GREETINGS\":\n break\n\n return body", "def payload_parse(self, mail):\n\t\tif mail.is_multipart():\n\t\t\tfor payload in mail.get_payload():\n\t\t\t\tif payload.get_content_maintype() == \"multipart\":\n\t\t\t\t\tself.payload_parse(payload)\n\t\t\t\telse:\n\t\t\t\t\tself.payload_handle(payload, mail)\n\t\t\t# Post deletion of payloads:\n\t\t\tself.payload_delete(mail)", "def createMessageWithAttachment(sender, to, subject, msgHtml, msgPlain, attachmentFile):\r\n message = MIMEMultipart('mixed')\r\n message['to'] = to\r\n message['from'] = sender\r\n message['subject'] = subject\r\n\r\n messageA = MIMEMultipart('alternative')\r\n messageR = MIMEMultipart('related')\r\n\r\n messageR.attach(MIMEText(msgHtml, 'html'))\r\n messageA.attach(MIMEText(msgPlain, 'plain'))\r\n messageA.attach(messageR)\r\n\r\n message.attach(messageA)\r\n\r\n print(\"create_message_with_attachment: file: %s\" % attachmentFile)\r\n content_type, encoding = mimetypes.guess_type(attachmentFile)\r\n\r\n if content_type is None or encoding is not None:\r\n content_type = 'application/octet-stream'\r\n main_type, sub_type = content_type.split('/', 1)\r\n if main_type == 'text':\r\n fp = open(attachmentFile, 'rb')\r\n msg = MIMEText(fp.read(), _subtype=sub_type)\r\n fp.close()\r\n elif main_type == 'image':\r\n fp = open(attachmentFile, 'rb')\r\n msg = MIMEImage(fp.read(), _subtype=sub_type)\r\n fp.close()\r\n elif main_type == 'audio':\r\n fp = open(attachmentFile, 'rb')\r\n msg = MIMEAudio(fp.read(), _subtype=sub_type)\r\n fp.close()\r\n else:\r\n fp = open(attachmentFile, 'rb')\r\n msg = MIMEBase(main_type, sub_type)\r\n msg.set_payload(fp.read())\r\n fp.close()\r\n filename = os.path.basename(attachmentFile)\r\n msg.add_header('Content-Disposition', 'attachment', filename=filename)\r\n message.attach(msg)\r\n\r\n return {'raw': base64.urlsafe_b64encode(message.as_string())}", "def get_all_messages(filenames):\n messages = []\n for filename in filenames:\n with open(filename, \"r\") as fi:\n soup = BeautifulSoup(fi, 'html.parser')\n for item in soup.find_all(\"div\", class_=MESSAGE_CLASS):\n message = FacebookMessage(item)\n if not message.is_valid():\n continue\n if message.is_special_message():\n continue\n messages.append(message)\n return messages", "def createMessageWithAttachment(\r\n sender, to, subject, msgHtml, msgPlain, attachmentFile):\r\n message = MIMEMultipart('mixed')\r\n message['to'] = to\r\n message['from'] = sender\r\n message['subject'] = subject\r\n\r\n messageA = MIMEMultipart('alternative')\r\n messageR = MIMEMultipart('related')\r\n\r\n messageR.attach(MIMEText(msgHtml, 'html'))\r\n messageA.attach(MIMEText(msgPlain, 'plain'))\r\n messageA.attach(messageR)\r\n\r\n message.attach(messageA)\r\n\r\n print(\"create_message_with_attachment: file: %s\" % attachmentFile)\r\n content_type, encoding = mimetypes.guess_type(attachmentFile)\r\n\r\n if content_type is None or encoding is not None:\r\n content_type = 'application/octet-stream'\r\n main_type, sub_type = content_type.split('/', 1)\r\n if main_type == 'text':\r\n fp = open(attachmentFile, 'rb')\r\n msg = MIMEText(fp.read(), _subtype=sub_type)\r\n fp.close()\r\n elif main_type == 'image':\r\n fp = open(attachmentFile, 'rb')\r\n msg = MIMEImage(fp.read(), _subtype=sub_type)\r\n fp.close()\r\n elif main_type == 'audio':\r\n fp = open(attachmentFile, 'rb')\r\n msg = MIMEAudio(fp.read(), _subtype=sub_type)\r\n fp.close()\r\n else:\r\n fp = open(attachmentFile, 'rb')\r\n msg = MIMEBase(main_type, sub_type)\r\n msg.set_payload(fp.read())\r\n fp.close()\r\n filename = os.path.basename(attachmentFile)\r\n msg.add_header('Content-Disposition', 'attachment', filename=filename)\r\n message.attach(msg)\r\n\r\n return {'raw': base64.urlsafe_b64encode((message.as_string()).encode('UTF-8')).decode('ascii')}", "def get_plaintext_parts(cls, msg):\n\n if msg.is_multipart():\n for payload in msg.get_payload():\n yield from cls.get_plaintext_parts(payload)\n else:\n if msg.get_content_type() == \"text/plain\":\n yield msg", "def test_fetchBodyStructureMultipart(self, uid=0):\n self.function = self.client.fetchBodyStructure\n self.messages = '3:9,10:*'\n innerMessage = FakeyMessage({\n 'content-type': 'text/plain; name=thing; key=\"value\"',\n 'content-id': 'this-is-the-content-id',\n 'content-description': 'describing-the-content-goes-here!',\n 'content-transfer-encoding': '8BIT',\n 'content-language': 'fr',\n 'content-md5': '123456abcdef',\n 'content-disposition': 'inline',\n 'content-location': 'outer space',\n }, (), b'', b'Body\\nText\\nGoes\\nHere\\n', 919293, None)\n self.msgObjs = [FakeyMessage({\n 'content-type': 'multipart/mixed; boundary=\"xyz\"',\n 'content-language': 'en',\n 'content-location': 'nearby',\n }, (), b'', b'', 919293, [innerMessage])]\n self.expected = {0: {'BODYSTRUCTURE': [\n ['text', 'plain', ['key', 'value', 'name', 'thing'],\n 'this-is-the-content-id', 'describing-the-content-goes-here!',\n '8BIT', '20', '4', '123456abcdef', ['inline', None], 'fr',\n 'outer space'],\n 'mixed', ['boundary', 'xyz'], None, 'en', 'nearby'\n ]}}\n return self._fetchWork(uid)", "def mime(self):\n author = self.author\n sender = self.sender\n if not author and sender:\n msg = 'Please specify the author using the \"author\" property. ' + \\\n 'Using \"sender\" for the From header is deprecated!'\n warnings.warn(msg, category=DeprecationWarning)\n author = sender\n sender = []\n if not author:\n raise ValueError('You must specify an author.')\n \n assert self.subject, \"You must specify a subject.\"\n assert len(self.recipients) > 0, \"You must specify at least one recipient.\"\n assert self.plain, \"You must provide plain text content.\"\n \n if len(author) > 1 and len(sender) == 0:\n raise ValueError('If there are multiple authors of message, you must specify a sender!')\n if len(sender) > 1:\n raise ValueError('You must not specify more than one sender!')\n \n if not self._dirty and self._processed and not interface.config.get(\"mail.debug\", False):\n return self._mime\n \n self._processed = False\n \n plain = MIMEText(self._callable(self.plain).encode(self.encoding), 'plain', self.encoding)\n \n rich = None\n if self.rich:\n rich = MIMEText(self._callable(self.rich).encode(self.encoding), 'html', self.encoding)\n \n message = self.mime_document(plain, rich)\n headers = self._build_header_list(author, sender)\n self._add_headers_to_message(message, headers)\n \n self._mime = message\n self._processed = True\n self._dirty = False\n \n return message", "def parse_attachment(message_part):\n content_disposition = message_part.get(\"Content-Disposition\", None)\n if content_disposition:\n dispositions = content_disposition.strip().split(\";\")\n if bool(content_disposition and\n dispositions[0].lower() == \"attachment\"):\n\n file_data = message_part.get_payload(decode=True)\n attachment = StringIO(file_data)\n attachment.content_type = message_part.get_content_type()\n attachment.size = len(file_data)\n attachment.name = None\n attachment.create_date = None\n attachment.mod_date = None\n attachment.read_date = None\n\n for param in dispositions[1:]:\n name, value = param.split(\"=\")\n name = name.lower()\n\n if name == \"filename\":\n attachment.name = value\n elif name == \"create-date\":\n attachment.create_date = value # TODO: datetime\n elif name == \"modification-date\":\n attachment.mod_date = value # TODO: datetime\n elif name == \"read-date\":\n attachment.read_date = value # TODO: datetime\n return attachment\n # no attachment\n return None", "def get_body(message):\n\n if message.is_multipart():\n # get the html text version only\n text_parts = [part\n for part in typed_subpart_iterator(message,\n 'text',\n 'html')]\n body = []\n for part in text_parts:\n charset = get_charset(part, get_charset(message))\n body.append(unicode(part.get_payload(decode=True),\n charset,\n \"replace\"))\n\n return u\"\\n\".join(body).strip()\n else:\n # if it is not multipart, the payload will be a string\n # representing the message body\n body = unicode(message.get_payload(decode=True),\n get_charset(message),\n \"replace\")\n return body.strip()", "def from_message_content(\n cls, content: bytes, content_type: str, **kwargs: Any\n ) -> \"MessageType\":", "def CreateMessageWithAttachment(sender, to, subject, message_text, file_dir,\n filename):\n message = MIMEMultipart()\n message['to'] = to\n message['from'] = sender\n message['subject'] = subject\n\n msg = MIMEText(message_text)\n message.attach(msg)\n\n path = os.path.join(file_dir, filename)\n content_type, encoding = mimetypes.guess_type(path)\n\n main_type, sub_type = content_type.split('/', 1)\n fp = open(path, 'rb')\n msg = MIMEBase(main_type, sub_type)\n msg.set_payload(fp.read())\n\n msg.add_header('Content-Disposition', 'attachment', filename=filename)\n encoders.encode_base64(msg)\n\n fp.close()\n\n message.attach(msg)\n\n return {'raw': base64.urlsafe_b64encode(bytes(message.as_string(), encoding='utf-8')).decode()}", "def message_to_objects(message):\n doc = etree.fromstring(message)\n if doc[0].tag == \"post\":\n # Skip the top <post> element if it exists\n doc = doc[0]\n entities = element_to_objects(doc)\n return entities", "def createMessage(self, sender: str, to: str, subject: str, message_text: str):\n message = MIMEText(message_text)\n message['to'] = to\n message['from'] = sender\n message['subject'] = subject\n raw_message = {'raw': base64.urlsafe_b64encode(message.as_bytes())}\n raw_message['raw']=raw_message['raw'].decode('utf-8')\n return raw_message", "def CreateMessageWithAttachment(\n sender, to, subject, message_text, file_dir, filename):\n message = MIMEMultipart()\n message['to'] = to\n message['from'] = sender\n message['subject'] = subject\n\n msg = MIMEText(message_text)\n message.attach(msg)\n\n path = os.path.join(file_dir, filename)\n content_type, encoding = mimetypes.guess_type(path)\n\n if content_type is None or encoding is not None:\n content_type = 'application/octet-stream'\n main_type, sub_type = content_type.split('/', 1)\n if main_type == 'text':\n fp = open(path, 'rb')\n msg = MIMEText(fp.read(), _subtype=sub_type)\n fp.close()\n elif main_type == 'image':\n fp = open(path, 'rb')\n msg = MIMEImage(fp.read(), _subtype=sub_type)\n fp.close()\n elif main_type == 'audio':\n fp = open(path, 'rb')\n msg = MIMEAudio(fp.read(), _subtype=sub_type)\n fp.close()\n else:\n fp = open(path, 'rb')\n msg = MIMEBase(main_type, sub_type)\n msg.set_payload(fp.read())\n fp.close()\n\n msg.add_header('Content-Disposition', 'attachment', filename=filename)\n message.attach(msg)\n\n return {'raw': base64.urlsafe_b64encode(message.as_string())}", "def fetch(self, start, stop=None, mbox=None):\n self.select_mailbox(mbox, False)\n if start and stop:\n submessages = self.messages[start - 1:stop]\n mrange = \",\".join(submessages)\n else:\n submessages = [start]\n mrange = start\n headers = \"DATE FROM TO CC SUBJECT\"\n query = (\n \"(FLAGS BODYSTRUCTURE RFC822.SIZE BODY.PEEK[HEADER.FIELDS ({})])\"\n .format(headers)\n )\n data = self._cmd(\"FETCH\", mrange, query)\n result = []\n for uid in submessages:\n msg_data = data[int(uid)]\n msg = email.message_from_string(\n msg_data[\"BODY[HEADER.FIELDS ({})]\".format(headers)]\n )\n msg[\"imapid\"] = uid\n msg[\"size\"] = msg_data[\"RFC822.SIZE\"]\n if r\"\\Seen\" not in msg_data[\"FLAGS\"]:\n msg[\"style\"] = \"unseen\"\n if r\"\\Answered\" in msg_data[\"FLAGS\"]:\n msg[\"answered\"] = True\n if r\"$Forwarded\" in msg_data[\"FLAGS\"]:\n msg[\"forwarded\"] = True\n if r\"\\Flagged\" in msg_data[\"FLAGS\"]:\n msg[\"flagged\"] = True\n bstruct = BodyStructure(msg_data[\"BODYSTRUCTURE\"])\n if bstruct.has_attachments():\n msg[\"attachments\"] = True\n result += [msg]\n return result", "def test_message_attachments():", "def msg_to_dict(msg):\n # FIXME: any repeated header will be ignored\n # Usually it is only 'Received' header\n d = {}\n\n if msg.text_part:\n body = msg.text_part.get_payload()\n charset = msg.text_part.charset\n else:\n body = msg.get_payload()\n charset = msg.get_charset()\n if charset:\n charset = charset.lower()\n i = charset.find('iso')\n u = charset.find('utf')\n if i > 0:\n charset = charset[i:]\n elif u > 0:\n charset = charset[u:]\n # Some old emails say it's ascii or unkown but in reality is not\n # not use any charset not iso or utf\n elif i != 0 and u != 0:\n charset = None\n\n for header in msg.keys():\n value = msg.get_decoded_header(header)\n value, _ = pyzmail.decode_text(value, charset, None)\n value = value.encode('UTF-8')\n header = header.replace('.', ',') # mongoDB don't likes '.' on keys\n d[header] = value\n\n attach = []\n if type(body) == str:\n body, _ = pyzmail.decode_text(body, charset, None)\n body = body.encode('UTF-8')\n # On attachments of emails sometimes it end up with a list of email.message\n elif type(body) == list:\n for part in body:\n zmail = pyzmail.PyzMessage(part)\n a = msg_to_dict(zmail)\n attach.append(a)\n body = attach[0]['Body']\n d['Body'] = body\n\n if len(msg.mailparts) > 1:\n for mailpart in msg.mailparts:\n zmail = pyzmail.PyzMessage(mailpart.part)\n a = msg_to_dict(zmail)\n attach.append(a)\n\n if attach:\n d['Attachments'] = attach\n\n return d", "def test_multiPartNoQuotes(self):\n outerBody = b''\n innerBody = b'Contained body message text. Squarge.'\n headers = OrderedDict()\n headers['from'] = 'sender@host'\n headers['to'] = 'recipient@domain'\n headers['subject'] = 'booga booga boo'\n headers['content-type'] = 'multipart/alternative; boundary=xyz'\n\n innerHeaders = OrderedDict()\n innerHeaders['subject'] = 'this is subject text'\n innerHeaders['content-type'] = 'text/plain'\n msg = FakeyMessage(headers, (), None, outerBody, 123,\n [FakeyMessage(innerHeaders, (), None, innerBody,\n None, None)],\n )\n\n c = BufferingConsumer()\n p = imap4.MessageProducer(msg)\n d = p.beginProducing(c)\n\n def cbProduced(result):\n self.failUnlessIdentical(result, p)\n self.assertEqual(\n b''.join(c.buffer),\n\n b'{237}\\r\\n'\n b'From: sender@host\\r\\n'\n b'To: recipient@domain\\r\\n'\n b'Subject: booga booga boo\\r\\n'\n b'Content-Type: multipart/alternative; boundary='\n b'xyz'\n b'\\r\\n'\n b'\\r\\n'\n b'\\r\\n'\n b'--xyz\\r\\n'\n b'Subject: this is subject text\\r\\n'\n b'Content-Type: text/plain\\r\\n'\n b'\\r\\n'\n + innerBody\n + b'\\r\\n--xyz--\\r\\n')\n\n return d.addCallback(cbProduced)", "def get_clean_text(message_list):\n return [\" \".join([str(line).strip('b').strip(\"'\") for line in msg]) for msg in message_list]", "def prepare_email(\n sender: Address | str,\n reply_to: Address | str | None = None,\n receivers: 'SequenceOrScalar[Address | str]' = (),\n cc: 'SequenceOrScalar[Address | str]' = (),\n bcc: 'SequenceOrScalar[Address | str]' = (),\n subject: str | None = None,\n content: str | None = None,\n plaintext: str | None = None,\n attachments: 'Iterable[Attachment | StrPath]' = (),\n headers: dict[str, str] | None = None,\n stream: str = 'marketing'\n) -> 'EmailJsonDict':\n\n if plaintext is None:\n # if no plaintext is given we require content\n # FIXME: it would be nice to verify this statically, but the\n # order of arguments makes this a bit cumbersome, we\n # could remedy this by forcing them all to be keyword\n # arguments\n assert content is not None\n\n # turn the html email into a plaintext representation\n # this leads to a lower spam rating\n plaintext = html_to_text(content)\n\n message: 'EmailJsonDict' = {\n 'From': format_single_address(sender),\n 'To': format_address(receivers),\n 'TextBody': plaintext,\n 'MessageStream': stream,\n }\n\n if reply_to is not None:\n # we require address objects so we can modify them\n sender = coerce_address(sender)\n reply_to = coerce_address(reply_to)\n message['ReplyTo'] = format_single_address(reply_to)\n\n # if the reply to address has a name part (Name <address@host>), use\n # the display_name for the sender address as well to somewhat hide the\n # fact that we're using a noreply email\n if reply_to.display_name and not sender.display_name:\n sender = Address(\n reply_to.display_name,\n sender.username,\n sender.domain\n )\n message['From'] = format_single_address(sender)\n\n if cc:\n message['Cc'] = format_address(cc)\n\n if bcc:\n message['Bcc'] = format_address(bcc)\n\n if subject is not None:\n message['Subject'] = subject\n\n if content is not None:\n message['HtmlBody'] = content\n\n if attachments:\n coerced_attachments = (\n a if isinstance(a, Attachment) else Attachment(a)\n for a in attachments\n )\n message['Attachments'] = [a.prepare() for a in coerced_attachments]\n\n if headers:\n message['Headers'] = [\n {'Name': k, 'Value': v} for k, v in headers.items()\n ]\n\n return message", "def process(mlist, msg, msgdata):\n # Digests and Mailman-craft messages should not get additional headers.\n if msgdata.get('isdigest') or msgdata.get('nodecorate'):\n return\n d = {}\n member = msgdata.get('member')\n if member is not None:\n # Calculate the extra personalization dictionary.\n recipient = msgdata.get('recipient', member.address.original_email)\n d['member'] = formataddr(\n (member.subscriber.display_name, member.subscriber.email))\n d['user_email'] = recipient\n d['user_delivered_to'] = member.address.original_email\n d['user_language'] = member.preferred_language.description\n d['user_name'] = member.display_name\n # For backward compatibility.\n d['user_address'] = recipient\n # Calculate the archiver permalink substitution variables. This provides\n # the $<archive-name>_url placeholder for every enabled archiver.\n for archiver in IListArchiverSet(mlist).archivers:\n if archiver.is_enabled:\n # Get the permalink of the message from the archiver. Watch out\n # for exceptions in the archiver plugin.\n try:\n archive_url = archiver.system_archiver.permalink(mlist, msg)\n except Exception:\n alog.exception('Exception in \"{}\" archiver'.format(\n archiver.system_archiver.name))\n archive_url = None\n if archive_url is not None:\n placeholder = '{}_url'.format(archiver.system_archiver.name)\n d[placeholder] = archive_url\n # These strings are descriptive for the log file and shouldn't be i18n'd\n d.update(msgdata.get('decoration-data', {}))\n header = decorate('list:member:regular:header', mlist, d)\n footer = decorate('list:member:regular:footer', mlist, d)\n # Escape hatch if both the footer and header are empty or None.\n if len(header) == 0 and len(footer) == 0:\n return\n # Be MIME smart here. We only attach the header and footer by\n # concatenation when the message is a non-multipart of type text/plain.\n # Otherwise, if it is not a multipart, we make it a multipart, and then we\n # add the header and footer as text/plain parts.\n #\n # BJG: In addition, only add the footer if the message's character set\n # matches the charset of the list's preferred language. This is a\n # suboptimal solution, and should be solved by allowing a list to have\n # multiple headers/footers, for each language the list supports.\n #\n # Also, if the list's preferred charset is us-ascii, we can always\n # safely add the header/footer to a plain text message since all\n # charsets Mailman supports are strict supersets of us-ascii --\n # no, UTF-16 emails are not supported yet.\n #\n # TK: Message with 'charset=' cause trouble. So, instead of\n # mgs.get_content_charset('us-ascii') ...\n mcset = msg.get_content_charset() or 'us-ascii'\n lcset = mlist.preferred_language.charset\n msgtype = msg.get_content_type()\n # BAW: If the charsets don't match, should we add the header and footer by\n # MIME multipart chroming the message?\n wrap = True\n if not msg.is_multipart() and msgtype == 'text/plain':\n # Save the RFC-3676 format parameters.\n format_param = msg.get_param('format')\n delsp = msg.get_param('delsp')\n # Save 'Content-Transfer-Encoding' header in case decoration fails.\n cte = msg.get('content-transfer-encoding')\n # header/footer is now in unicode.\n try:\n oldpayload = msg.get_payload(decode=True).decode(mcset)\n del msg['content-transfer-encoding']\n frontsep = endsep = ''\n if len(header) > 0 and not header.endswith('\\n'):\n frontsep = '\\n'\n if len(footer) > 0 and not oldpayload.endswith('\\n'):\n endsep = '\\n'\n payload = header + frontsep + oldpayload + endsep + footer\n # When setting the payload for the message, try various charset\n # encodings until one does not produce a UnicodeError. We'll try\n # charsets in this order: the list's charset, the message's\n # charset, then utf-8. It's okay if some of these are duplicates.\n for cset in (lcset, mcset, 'utf-8'):\n try:\n msg.set_payload(payload.encode(cset), cset)\n except UnicodeError:\n pass\n else:\n if format_param:\n msg.set_param('format', format_param)\n if delsp:\n msg.set_param('delsp', delsp)\n wrap = False\n break\n except (LookupError, UnicodeError):\n if cte:\n # Restore the original c-t-e.\n del msg['content-transfer-encoding']\n msg['Content-Transfer-Encoding'] = cte\n elif msg.get_content_type() == 'multipart/mixed':\n # The next easiest thing to do is just prepend the header and append\n # the footer as additional subparts\n payload = msg.get_payload()\n if not isinstance(payload, list):\n payload = [payload]\n if len(footer) > 0:\n mimeftr = MIMEText(footer.encode(lcset), 'plain', lcset)\n mimeftr['Content-Disposition'] = 'inline'\n payload.append(mimeftr)\n if len(header) > 0:\n mimehdr = MIMEText(header.encode(lcset), 'plain', lcset)\n mimehdr['Content-Disposition'] = 'inline'\n payload.insert(0, mimehdr)\n msg.set_payload(payload)\n wrap = False\n # If we couldn't add the header or footer in a less intrusive way, we can\n # at least do it by MIME encapsulation. We want to keep as much of the\n # outer chrome as possible.\n if not wrap:\n return\n # Because of the way Message objects are passed around to process(), we\n # need to play tricks with the outer message -- i.e. the outer one must\n # remain the same instance. So we're going to create a clone of the outer\n # message, with all the header chrome intact, then copy the payload to it.\n # This will give us a clone of the original message, and it will form the\n # basis of the interior, wrapped Message.\n inner = Message()\n # Which headers to copy? Let's just do the Content-* headers\n for h, v in msg.items():\n if h.lower().startswith('content-'):\n inner[h] = v\n inner.set_payload(msg.get_payload())\n # For completeness\n inner.set_unixfrom(msg.get_unixfrom())\n inner.preamble = msg.preamble\n inner.epilogue = msg.epilogue\n # Don't copy get_charset, as this might be None, even if\n # get_content_charset isn't. However, do make sure there is a default\n # content-type, even if the original message was not MIME.\n inner.set_default_type(msg.get_default_type())\n # BAW: HACK ALERT.\n if hasattr(msg, '__version__'):\n inner.__version__ = msg.__version__\n # Now, play games with the outer message to make it contain three\n # subparts: the header (if any), the wrapped message, and the footer (if\n # any).\n payload = [inner]\n if len(header) > 0:\n mimehdr = MIMEText(header.encode(lcset), 'plain', lcset)\n mimehdr['Content-Disposition'] = 'inline'\n payload.insert(0, mimehdr)\n if len(footer) > 0:\n mimeftr = MIMEText(footer.encode(lcset), 'plain', lcset)\n mimeftr['Content-Disposition'] = 'inline'\n payload.append(mimeftr)\n msg.set_payload(payload)\n del msg['content-type']\n del msg['content-transfer-encoding']\n del msg['content-disposition']\n msg['Content-Type'] = 'multipart/mixed'", "def body_parts(self):\n return_vals = {'files': []}\n\n for part in self.email.walk():\n maintype, subtype = part.get_content_type().split('/')\n # Multipart/* are containers, so we skip it\n if maintype == 'multipart':\n continue\n # Get Text and HTML\n filename = part.get_filename()\n if filename:\n return_vals['files'].append(filename)\n elif maintype == 'text':\n if subtype in ['plain', 'html']:\n encoder = part.get_content_charset() or 'utf-8'\n return_vals.update(\n {subtype:part.get_payload(decode=True).decode(encoder)})\n return return_vals", "def attachments(self):\n for part in self.email.walk():\n filename = part.get_filename()\n if filename:\n yield {\n 'type': part.get_content_type(),\n 'name': filename,\n 'content': part.get_payload()\n }", "def GetMessage(service, user_id, msg_id):\n try:\n message = service.users().messages().get(userId=user_id, id=msg_id,format='raw').execute()\n msg_str = base64.urlsafe_b64decode(message['raw'].encode('ASCII'))\n mime_msg = email.message_from_string(msg_str)\n data = {}\n data['to'] = mime_msg['To']\n data['from'] = mime_msg['From']\n data['date'] = mime_msg['Date']\n data['subject'] = mime_msg['Subject']\n data['message'] = \"\"\n return data\n except errors.HttpError as error:\n print('An error occurred: %s' % error)", "def format_attachments(self, text, attachments):\n\n refs = re.findall( '\\[file-([1-9][0-9]*)\\]', text)\n if not refs:\n return text\n refs = sorted(set(refs))\n\n max_ref = attachments.count()\n if not max_ref:\n return text\n refs = [int(ref) for ref in refs if int(ref) <= max_ref]\n attachments = [a for a in attachments.order_by('pk')[0:max(refs)]]\n for ref in refs:\n text = text.replace('[file-%d]' % ref, attachments[ref-1].file.url)\n\n return text", "def decode_data(self, data):\n self._mms_message = message.MMSMessage()\n self._mms_data = data\n body_iter = self.decode_message_header()\n self.decode_message_body(body_iter)\n return self._mms_message", "def parse_message(message):\n request_iter = request_regex.finditer(message.body())\n requests = []\n for build_request in request_iter:\n requests.append(determine_request(build_request))\n if requests:\n results = serverset.build_request(requests)\n message.reply(build_reply(results))", "def create_message(sender, to, subject, cc, message_text):\n\n email = \"\"\n \n for c in message_text:\n email += c\n \n message = MIMEText(email)\n message['to'] = to\n message['cc'] = cc\n message['from'] = sender\n message['subject'] = subject\n return {'raw': base64.urlsafe_b64encode(message.as_string())}", "def parse_form_multipart(body):\n # type: (unicode) -> dict[unicode, Any]\n import email\n import json\n\n import xmltodict\n\n def parse_message(msg):\n if msg.is_multipart():\n res = {\n part.get_param(\"name\", failobj=part.get_filename(), header=\"content-disposition\"): parse_message(part)\n for part in msg.get_payload()\n }\n else:\n content_type = msg.get(\"Content-Type\")\n if content_type in (\"application/json\", \"text/json\"):\n res = json.loads(msg.get_payload())\n elif content_type in (\"application/xml\", \"text/xml\"):\n res = xmltodict.parse(msg.get_payload())\n elif content_type in (\"text/plain\", None):\n res = msg.get_payload()\n else:\n res = \"\"\n\n return res\n\n headers = _asm_request_context.get_headers()\n if headers is not None:\n content_type = headers.get(\"Content-Type\")\n msg = email.message_from_string(\"MIME-Version: 1.0\\nContent-Type: %s\\n%s\" % (content_type, body))\n return parse_message(msg)\n return {}", "def test_multiPartNoBoundary(self):\n outerBody = b''\n innerBody = b'Contained body message text. Squarge.'\n headers = OrderedDict()\n headers['from'] = 'sender@host'\n headers['to'] = 'recipient@domain'\n headers['subject'] = 'booga booga boo'\n headers['content-type'] = 'multipart/alternative'\n\n innerHeaders = OrderedDict()\n innerHeaders['subject'] = 'this is subject text'\n innerHeaders['content-type'] = 'text/plain'\n msg = FakeyMessage(headers, (), None, outerBody, 123,\n [FakeyMessage(innerHeaders, (), None, innerBody,\n None, None)],\n )\n\n c = BufferingConsumer()\n p = imap4.MessageProducer(msg)\n p._uuid4 = lambda: uuid.UUID('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')\n\n d = p.beginProducing(c)\n\n def cbProduced(result):\n self.failUnlessIdentical(result, p)\n self.assertEqual(\n b''.join(c.buffer),\n\n b'{341}\\r\\n'\n b'From: sender@host\\r\\n'\n b'To: recipient@domain\\r\\n'\n b'Subject: booga booga boo\\r\\n'\n b'Content-Type: multipart/alternative; boundary='\n b'\"----=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\"'\n b'\\r\\n'\n b'\\r\\n'\n b'\\r\\n'\n b'------=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n'\n b'Subject: this is subject text\\r\\n'\n b'Content-Type: text/plain\\r\\n'\n b'\\r\\n'\n + innerBody\n + b'\\r\\n------=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa--\\r\\n')\n\n return d.addCallback(cbProduced)", "def get_messages(character):\n mail = character.db.mail\n try:\n messages = [item for item in mail if item[TIMESTAMP] <= item[MESSAGE].date_sent]\n # Let's clean up mail storage for this user while we're at it.\n character.db.mail = messages\n except TypeError:\n messages = []\n return messages", "def _DecompressMessageList(\n packed_message_list: rdf_flows.PackedMessageList,\n) -> rdf_flows.MessageList:\n compression = packed_message_list.compression\n if compression == rdf_flows.PackedMessageList.CompressionType.UNCOMPRESSED:\n data = packed_message_list.message_list\n\n elif compression == rdf_flows.PackedMessageList.CompressionType.ZCOMPRESSION:\n try:\n data = zlib.decompress(packed_message_list.message_list)\n except zlib.error as e:\n raise RuntimeError(\"Failed to decompress: %s\" % e) from e\n else:\n raise RuntimeError(\"Compression scheme not supported\")\n\n try:\n result = rdf_flows.MessageList.FromSerializedBytes(data)\n except rdfvalue.DecodeError as e:\n raise RuntimeError(\"RDFValue parsing failed.\") from e\n\n return result", "def get_message(post_request):\n lst = ['stripped-text', 'stripped-signature']\n for value in lst:\n if value in post_request.POST:\n return post_request.POST.get(value)\n\n return post_request.POST.get('body-plain')", "def import_report_from_email(content):\n if isinstance(content, six.string_types):\n msg = email.message_from_string(content)\n elif isinstance(content, six.binary_type):\n msg = email.message_from_bytes(content)\n else:\n msg = email.message_from_file(content)\n err = False\n for part in msg.walk():\n if part.get_content_type() not in ZIP_CONTENT_TYPES:\n continue\n try:\n fpo = six.BytesIO(part.get_payload(decode=True))\n # Try to get the actual file type of the buffer\n # required to make sure we are dealing with an XML file\n file_type = magic.Magic(uncompress=True, mime=True).from_buffer(fpo.read(2048))\n fpo.seek(0)\n if file_type in FILE_TYPES:\n import_archive(fpo, content_type=part.get_content_type())\n except (OSError, IOError):\n print('Error: the attachment does not match the mimetype')\n err = True\n else:\n fpo.close()\n if err:\n # Return EX_DATAERR code <data format error> available\n # at sysexits.h file\n # (see http://www.postfix.org/pipe.8.html)\n sys.exit(65)", "def parse_inline_attachments(self, post_html):\n if 'inline-attachment' not in post_html:\n return []\n self.post_html = post_html\n self.p = PyQuery(self.post_html)\n\n attachment_dicts = []\n attachment_dicts += self.parse_s_thumbnails()\n attachment_dicts += self.parse_s_image()\n attachment_dicts += self.parse_s_file()\n attachment_dicts += self.parse_s_wm_file()\n attachment_dicts += self.parse_s_flash_file()\n attachment_dicts += self.parse_s_quicktime_file()\n attachment_dicts += self.parse_s_rm_file()\n\n #print('parse_inline_attachments() attachment_dicts: {0!r}'.format(attachment_dicts))\n return attachment_dicts", "def _serialize_event_messages(event):\n if event.content_type == MessagingEvent.CONTENT_EMAIL:\n return _get_messages_for_email(event)\n\n if event.content_type in (MessagingEvent.CONTENT_SMS, MessagingEvent.CONTENT_SMS_CALLBACK):\n return _get_messages_for_sms(event)\n\n if event.content_type in (MessagingEvent.CONTENT_SMS_SURVEY, MessagingEvent.CONTENT_IVR_SURVEY):\n return _get_messages_for_survey(event)\n return []", "def create_email(_from, _to, _subj, _body, files):\r\n msg = MIMEMultipart()\r\n msg['From'] = _from\r\n msg['To'] = _to\r\n msg['Subject'] = _subj\r\n msg.attach(MIMEText(_body, 'plain'))\r\n\r\n if files:\r\n for file in files:\r\n part = MIMEBase('application', 'octet-stream')\r\n part.set_payload(open(file, 'rb').read())\r\n encoders.encode_base64(part)\r\n part.add_header('Content-Disposition', 'attachment; filename=' + os.path.basename(file))\r\n msg.attach(part)\r\n\r\n return msg", "def extractsms(htmlsms) :\r\n msgitems = []\t\t\t\t\t\t\t\t\t\t# accum message items here\r\n #\tExtract all conversations by searching for a DIV with an ID at top level.\r\n tree = BeautifulSoup.BeautifulSoup(htmlsms)\t\t\t# parse HTML into tree\r\n conversations = tree.findAll(\"div\",attrs={\"id\" : True},recursive=False)\r\n for conversation in conversations :\r\n #\tFor each conversation, extract each row, which is one SMS message.\r\n rows = conversation.findAll(attrs={\"class\" : \"gc-message-sms-row\"})\r\n for row in rows :\t\t\t\t\t\t\t\t# for all rows\r\n #\tFor each row, which is one message, extract all the fields.\r\n msgitem = {\"id\" : conversation[\"id\"]}\t\t# tag this message with conversation ID\r\n spans = row.findAll(\"span\",attrs={\"class\" : True}, recursive=False)\r\n for span in spans :\t\t\t\t\t\t\t# for all spans in row\r\n cl = span[\"class\"].replace('gc-message-sms-', '')\r\n msgitem[cl] = (\" \".join(span.findAll(text=True))).strip()\t# put text in dict\r\n msgitems.append(msgitem)\t\t\t\t\t# add msg dictionary to list\r\n return msgitems", "def FilterRawEmail(raw_msg):\r\n links = []\r\n soup = BeautifulSoup(raw_msg, features=\"lxml\")\r\n for a_tag in soup.find_all(\"a\", href=True):\r\n link = a_tag[\"href\"]\r\n if (len(link) < 10):\r\n continue\r\n else:\r\n print(\"Before Cleaning: \", link, end=\"\\n\\n\")\r\n clean_link = parse.unquote_plus(quopri.decodestring(link).decode('utf-8'))\r\n print(\"Link: \", clean_link, end = \"\\n\\n\")\r\n links.append(clean_link)\r\n return links\r\n\r\n\r\n# =============================================================================\r\n# =============================================================================\r\n\r\n\r\n def WriteToFile(msg, file_name):\r\n \"\"\"Write out a message to a file for debugging purposes.\r\n Args:\r\n msg: a message object\r\n file_name: the output file name\r\n Returns:\r\n None\r\n \"\"\"\r\n out_msg = str(msg)\r\n file = open(file_name, \"w\")\r\n file.write(str(decoded_msg))", "def getBody(self, mail_message):\n bodies = mail_message.bodies()\n for content_type, body in bodies:\n return body.decode().encode(\"utf-8\")", "def imageparts(msg):\n # Don't want a set here because we want to be able to process them in\n # order.\n return filter(lambda part:\n part.get_content_type().startswith('image/'),\n msg.walk())", "def transform(self, email_path):\n mail = open(email_path, 'r')\n content = mail.read(self.max_read_len)\n i = 0\n while not(content[i] == '\\n' and content[i + 1] == '\\n') and i < len(content) - self.ngram:\n i += 1\n header = content[:i]\n # TODO find a smarter way deal with the header-body problem\n body = content[i + 2:]\n if len(body) + len(header) > self.max_read_len:\n body = body[:max(1000, self.max_read_len - len(header))]\n header_set = self.tokenize(header)\n body_set = self.tokenize(body)\n mail.close()\n return (header_set, body_set)", "def parse_message(msg):\n # the message number, increments with each message\n msg_number = msg[0][0]\n # the message type\n msg_type = msg[0][1][0]\n return {\n 'noop': parse_noop_message,\n 'c': parse_content_message,\n }[msg_type](msg, msg_number)", "def create_message(sender, to, subject, message_text, image_directory=None):\n message = MIMEMultipart()\n message['to'] = to if isinstance(to, str) else ','.join(to)\n if sender:\n message['from'] = sender\n message['subject'] = subject\n\n msg = MIMEText(message_text, 'html')\n message.attach(msg)\n\n if image_directory:\n for image_name in os.listdir(image_directory):\n with open(os.path.join(image_directory, image_name), 'rb') as img_data:\n content_type, encoding = mimetypes.guess_type(image_name)\n\n if content_type is None or encoding is not None:\n content_type = 'application/octet-stream'\n main_type, sub_type = content_type.split('/', 1)\n if main_type == 'image':\n msg = MIMEImage(img_data.read(), _subtype=sub_type)\n else:\n msg = MIMEBase(main_type, sub_type)\n msg.set_payload(img_data.read())\n\n msg.add_header('Content-Id', '<' + image_name.split('.')[0] + '>') # angle brackets are important\n message.attach(msg)\n\n # https://www.pronoy.in/2016/10/20/python-3-5-x-base64-encoding-3/\n return {'raw': base64.urlsafe_b64encode(message.as_bytes()).decode('ascii')}", "def create_message(sender, to, subject, message_text_html, message_text_plain):\r\n message = MIMEMultipart('alternative')\r\n message['to'] = to\r\n message['from'] = sender\r\n message['subject'] = subject\r\n message_html = MIMEText(message_text_html, 'html') # HTML version\r\n message_plain = MIMEText(message_text_plain) # plain text version\r\n message.attach(message_plain)\r\n message.attach(message_html)\r\n return {'raw': base64.urlsafe_b64encode(message.as_string().encode()).decode()}", "def get_attachments(service, user_id, msg_id, save_path):\n try:\n message = service.users().messages().get(userId=user_id, id=msg_id).execute()\n\n if 'parts' not in message['payload']:\n if message['payload']['body']['size'] > 0:\n print(\"Downloading single-part attachment...\")\n file_data = base64.urlsafe_b64decode(message['payload']['body']['data'].encode('UTF-8'))\n path = ''.join([save_path, sanitize_string(message['snippet'][0:70])])\n write_file_to_location(file_data, path)\n elif 'parts' in message['payload']:\n for part in message['payload']['parts']:\n print(\"Downloading multi-part attachment...\")\n if part['filename']:\n data = get_data_from_part(service, user_id, msg_id, part)\n file_data = base64.urlsafe_b64decode(data.encode('UTF-8'))\n path = ''.join([save_path, part['filename']])\n write_file_to_location(file_data, path)\n # Nothing to download\n else:\n return None\n\n except errors.HttpError as error:\n print(f\"An error occurred: {error}\")\n\n return msg_id", "def CreateMessage(sender, to, subject, message_text):\n message = MIMEText(message_text, 'html')\n message['to'] = to\n message['from'] = sender\n message['subject'] = subject\n return {'raw': base64.urlsafe_b64encode(message.as_string().encode()).decode()}" ]
[ "0.6099892", "0.6005864", "0.59787667", "0.59406656", "0.5869029", "0.5850756", "0.5808466", "0.57489055", "0.5714828", "0.5676952", "0.5646985", "0.5642732", "0.56374663", "0.56352216", "0.5632284", "0.56273305", "0.56104976", "0.5606248", "0.55780625", "0.55734146", "0.5572651", "0.5569366", "0.55318105", "0.5527631", "0.55264026", "0.5511944", "0.5511208", "0.5511145", "0.54915875", "0.54813534", "0.54507744", "0.5424767", "0.5424696", "0.5406812", "0.5403686", "0.5385209", "0.53752303", "0.5368227", "0.53666687", "0.53606635", "0.5340565", "0.5339472", "0.53324634", "0.53248763", "0.5320731", "0.52977914", "0.5258983", "0.5250897", "0.5246073", "0.5243607", "0.5237938", "0.5224904", "0.5223512", "0.5222845", "0.5209592", "0.5195979", "0.5172956", "0.5165836", "0.5161032", "0.5160018", "0.5159428", "0.5142915", "0.5124342", "0.51219565", "0.5120416", "0.5106521", "0.5097909", "0.5095448", "0.509329", "0.50833434", "0.5082212", "0.5080686", "0.50797343", "0.5078147", "0.50760716", "0.50709534", "0.5068067", "0.50481546", "0.50357956", "0.502161", "0.50187755", "0.501311", "0.50107664", "0.50062096", "0.50032896", "0.5002143", "0.49894896", "0.49876416", "0.49787298", "0.49673817", "0.4960337", "0.49593094", "0.49546316", "0.49522913", "0.49439737", "0.49432465", "0.49431083", "0.49378404", "0.49325854", "0.49295542" ]
0.6649057
0
Coerce an unknown date to the given timezone, then to UTC
def normalize_to_utc(date, timezone): local_tz = pytz.timezone(timezone) new_date = date.replace(tzinfo = local_tz) utc_tz = pytz.timezone('UTC') new_date = new_date.astimezone(utc_tz) return new_date
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def toutc(dateobj, timezone):\n fmtdate = parser.parse(dateobj) # string to datetime object\n user_tz = pytz.timezone(timezone) # getting user's timezone\n localize_date_with_tz = user_tz.localize(fmtdate) #adding user's timezone to datetime object\n utcdate = pytz.utc.normalize(localize_date_with_tz) #converting user's datetime to utc datetime\n return utcdate", "def date_to_utc(self, date):\n if date.tzinfo is not None:\n # date is timezone-aware\n date = date.astimezone(self.tz_utc)\n\n else:\n # date is a naive date: assume expressed in local time\n date = date.replace(tzinfo=self.tz_local)\n # and converted to UTC\n date = date.astimezone(self.tz_utc)\n return date", "def fromutc(self, dt):\n if dt.tzinfo is None:\n return dt.replace(tzinfo=self)\n return super(UTC, self).fromutc(dt)", "def to_utc(dt):\n if dt.tzinfo is None:\n return dt.replace(tzinfo=pytz.utc)\n else:\n return dt.astimezone(pytz.utc)", "def resolved_at_to_utc(user_time, user_tz):\n if isinstance(user_tz, str):\n user_tz = dateutil.tz.gettz(user_tz)\n\n localized_time = user_time.replace(tzinfo=user_tz)\n return localized_time.to(\"UTC\").datetime", "def date_to_utc(date):\n utc_date = date_convert_to_timezone(date, 'utc')\n return utc_date", "def set_utc(date_time):\n utc = datetime.timezone(datetime.timedelta(0))\n date_time = date_time.replace(tzinfo=utc)\n return date_time", "def set_utc(df, locale):\n return df.tz_localize('utc').tz_convert(None)", "def date_from_utc(date):\n return pytz.utc.localize(date)", "def tz_as_utc(dt: datetime) -> datetime:\n if dt.tzinfo is None:\n return dt.replace(tzinfo=tzutc())\n return dt.astimezone(tzutc())", "def convert_timezone(dt: datetime, tz_info: tzinfo = UTC) -> datetime:\n if dt.tzinfo is None:\n raise ValueError(f\"{dt} is a naive datetime\")\n return dt.astimezone(tz_info)", "def datetime_tz_to_utc(dt):\n\n if type(dt) != datetime.datetime:\n raise TypeError(f\"dt must be type datetime.datetime, not {type(dt)}\")\n\n if dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) is not None:\n return dt.replace(tzinfo=dt.tzinfo).astimezone(tz=datetime.timezone.utc)\n else:\n raise ValueError(f\"dt does not have timezone info\")", "def datetime_naive_to_utc(dt):\n\n if type(dt) != datetime.datetime:\n raise TypeError(f\"dt must be type datetime.datetime, not {type(dt)}\")\n\n if dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) is not None:\n # has timezone info\n raise ValueError(\n \"dt must be naive/timezone unaware: \"\n f\"{dt} has tzinfo {dt.tzinfo} and offset {dt.tzinfo.utcoffset(dt)}\"\n )\n\n return dt.replace(tzinfo=datetime.timezone.utc)", "def tz_localize(self, dt):\n if is_datetime(dt):\n # Naive datetime, see\n # https://docs.python.org/3/library/datetime.html#available-types\n if dt.tzinfo == None or dt.tzinfo.utcoffset(dt) == None:\n return dt\n else:\n return dt.astimezone(self.args.tz)\n elif is_date(dt):\n return dt\n else:\n raise ValueError('Expected datetime or date object')", "def date_convert_to_timezone(date, timezone_string):\n # if the given date does not have a timezone, use the system's timezone\n if date.tzinfo is None:\n date = date_make_timezone_aware(date)\n\n timezone_object = pytz_timezone_object(timezone_string)\n converted_date = date.astimezone(timezone_object)\n return converted_date", "def localize_utc(value):\n if isinstance(value, datetime):\n return value.replace(tzinfo=tzutc()).astimezone(tzlocal())\n else:\n return value", "def _parse_date(s):\n return parse(s).astimezone(pytz.utc)", "def date_to_timezone(date: dt.date | None) -> dt.datetime | None:\n\n if not date:\n return None\n\n return dt.datetime.combine(date, dt.time.min, tzinfo=dt.timezone.utc)", "def tolocal(dateobj, timezone):\n \n utc_date_with_tz = pytz.utc.localize(dateobj) # \n user_tz = pytz.timezone(timezone)\n localdate = user_tz.normalize(utc_date_with_tz) \n \n return localdate", "def to_datetime_utc(obj: Union[None, pendulum.DateTime, str]) -> Union[pendulum.DateTime, None]:\n\n if isinstance(obj, pendulum.DateTime):\n return obj.in_tz(tz=\"UTC\")\n elif isinstance(obj, str):\n dt = pendulum.parse(obj)\n return dt.in_tz(tz=\"UTC\")\n elif obj is None:\n return None\n\n raise ValueError(\"body should be None or pendulum.DateTime\")", "def localize_datetime_utc(date_time):\n return pytz.utc.localize(date_time)", "def localize_datetime_utc(date_time):\n return pytz.utc.localize(date_time)", "def localize_datetime_utc(date_time):\n return pytz.utc.localize(date_time)", "def localize_date_utc(date):\n return pytz.utc.localize(\n datetime.datetime.combine(\n date, datetime.time(hour=0, minute=0, second=0)\n )\n )", "def localize_date_utc(date):\n return pytz.utc.localize(\n datetime.datetime.combine(\n date, datetime.time(hour=0, minute=0, second=0)\n )\n )", "def insure_localisation(dt, timezone=str(get_localzone())):\n\n if isinstance(dt, datetime):\n tz = pytz.timezone(timezone)\n if dt.tzinfo is None:\n dt = tz.localize(dt)\n return dt\n elif isinstance(dt, date):\n return dt\n else:\n raise TypeError('\"date\" or \"datetime\" object expected, not {!r}.'.format(dt.__class__.__name__))", "def convert_date(self, date_str):\n\t\tdate_obj = datetime.datetime.strptime(date_str, '%Y-%m-%d %H:%M:%S')\n\t\tdate_obj = date_obj.replace(tzinfo=pytz.timezone('UTC'))\n\t\treturn date_obj.astimezone(pytz.timezone(self.time_zone))", "def datetime_remove_tz(dt):\n\n if type(dt) != datetime.datetime:\n raise TypeError(f\"dt must be type datetime.datetime, not {type(dt)}\")\n\n return dt.replace(tzinfo=None)", "def _dates_to_naive_utc(date_objects):\n\n if len(date_objects) == 0:\n return []\n naive_dates = []\n for date in date_objects:\n if date.tzinfo is not None and date.tzinfo.utcoffset(date) is not None:\n date = date.astimezone(pytz.utc)\n naive_dates.append(date.replace(tzinfo=None))\n return naive_dates", "def ensure_utc_time(ts: datetime) -> datetime:\n if ts.tzinfo is None:\n return datetime(*ts.timetuple()[:6], tzinfo=UTC_TZ)\n elif str(ts.tzinfo) != str(UTC_TZ):\n return ts.astimezone(UTC_TZ)\n return ts", "def convert_timezone(date_time, tz=pytz.timezone('UTC'), datetime_format=app_constant.DATE_TIME_ZONE_FORMAT):\n date_time = _get_date_time(date_time=date_time, datetime_format=datetime_format)\n return date_time.astimezone(tz)", "async def test_process_timestamp_to_utc_isoformat() -> None:\n datetime_with_tzinfo = datetime(2016, 7, 9, 11, 0, 0, tzinfo=dt_util.UTC)\n datetime_without_tzinfo = datetime(2016, 7, 9, 11, 0, 0)\n est = dt_util.get_time_zone(\"US/Eastern\")\n datetime_est_timezone = datetime(2016, 7, 9, 11, 0, 0, tzinfo=est)\n est = dt_util.get_time_zone(\"US/Eastern\")\n datetime_est_timezone = datetime(2016, 7, 9, 11, 0, 0, tzinfo=est)\n nst = dt_util.get_time_zone(\"Canada/Newfoundland\")\n datetime_nst_timezone = datetime(2016, 7, 9, 11, 0, 0, tzinfo=nst)\n hst = dt_util.get_time_zone(\"US/Hawaii\")\n datetime_hst_timezone = datetime(2016, 7, 9, 11, 0, 0, tzinfo=hst)\n\n assert (\n process_timestamp_to_utc_isoformat(datetime_with_tzinfo)\n == \"2016-07-09T11:00:00+00:00\"\n )\n assert (\n process_timestamp_to_utc_isoformat(datetime_without_tzinfo)\n == \"2016-07-09T11:00:00+00:00\"\n )\n assert (\n process_timestamp_to_utc_isoformat(datetime_est_timezone)\n == \"2016-07-09T15:00:00+00:00\"\n )\n assert (\n process_timestamp_to_utc_isoformat(datetime_nst_timezone)\n == \"2016-07-09T13:30:00+00:00\"\n )\n assert (\n process_timestamp_to_utc_isoformat(datetime_hst_timezone)\n == \"2016-07-09T21:00:00+00:00\"\n )\n assert process_timestamp_to_utc_isoformat(None) is None", "def make_naive_utc(date_time: datetime.datetime) -> datetime.datetime:\n utc_timezone = datetime.timezone(datetime.timedelta(seconds=0))\n return date_time.astimezone(utc_timezone).replace(tzinfo=None)", "def dt_tz_convert(self, tz):\n return DateTimeDefault.register(pandas.Series.dt.tz_convert)(self, tz)", "def getutc(value):\n # arrow/dateutil don't support non-unique DST shorthand, so replace those tz strings\n if value.find('EDT') > -1:\n value = value.replace('EDT', 'EST5EDT')\n elif value.find('CDT') > -1:\n value = value.replace('CDT', 'CST6CDT')\n elif value.find('MDT') > -1:\n value = value.replace('MDT', 'MST7MDT')\n elif value.find('PDT') > -1:\n value = value.replace('PDT', 'PST8PDT')\n return arrow.get(value, 'hmm A ZZZ ddd MMM DD YYYY').to('UTC')", "def localize(dt):\n if dt.tzinfo is UTC:\n return (dt + LOCAL_UTC_OFFSET).replace(tzinfo=None)\n # No TZ info so not going to assume anything, return as-is.\n return dt", "def localtime2utc(date):\n return date + (datetime.utcnow() - datetime.now())", "def aware_utc_from_timestamp(timestamp):\n return datetime.utcfromtimestamp(timestamp).replace(tzinfo=pytz.utc)", "def convert_utc(utc) -> dt.datetime:\n return iso8601.parse_date(utc)", "def datetime_to_utc(dt):\n if dt.tzinfo:\n # Reduce [24, 48) hour offsets.\n if dt.tzinfo._offset >= datetime.timedelta(1):\n dt.tzinfo._offset -= datetime.timedelta(1)\n dt += datetime.timedelta(1)\n elif dt.tzinfo._offset <= datetime.timedelta(-1):\n dt.tzinfo._offset += datetime.timedelta(1)\n dt -= datetime.timedelta(1)\n return datetime.datetime(*dt.utctimetuple()[:6])", "def timezonize(tz):\n \n # Checking if somthing is a valid pytz object is hard as it seems that they are spread around the pytz package.\n #\n # Option 1): Try to convert if string or unicode, otherwise try to instantiate a datetieme object decorated\n # with the timezone in order to check if it is a valid one. \n #\n # Option 2): Get all members of the pytz package and check for type, see\n # http://stackoverflow.com/questions/14570802/python-check-if-object-is-instance-of-any-class-from-a-certain-module\n #\n # Option 3) perform a hand-made test. We go for this one, tests would fail if something changes in this approach.\n \n if not 'pytz' in str(type(tz)):\n tz = pytz.timezone(tz)\n \n return tz", "def set_date_tzinfo(d, tz_name=None):\r\n if tz_name and not d.tzinfo:\r\n tz = pytz.timezone(tz_name)\r\n return tz.localize(d)\r\n return d", "def _get_tz():\n return 'UTC'", "def fromutc(self, dt):\n if not isinstance(dt, real_datetime):\n raise TypeError(\"fromutc() requires a datetime argument\")\n if dt.tzinfo is not self:\n raise ValueError(\"dt.tzinfo is not self\")\n\n dtoff = dt.utcoffset()\n if dtoff is None:\n raise ValueError(\"fromutc() requires a non-None utcoffset() \" \"result\")\n\n # See the long comment block at the end of this file for an\n # explanation of this algorithm.\n dtdst = dt.dst()\n if dtdst is None:\n raise ValueError(\"fromutc() requires a non-None dst() result\")\n delta = dtoff - dtdst\n if delta:\n dt += delta\n dtdst = dt.dst()\n if dtdst is None:\n raise ValueError(\n \"fromutc(): dt.dst gave inconsistent \" \"results; cannot convert\"\n )\n return dt + dtdst", "def dt_tz_localize(self, tz, ambiguous=\"raise\", nonexistent=\"raise\"):\n return DateTimeDefault.register(pandas.Series.dt.tz_localize)(\n self, tz, ambiguous, nonexistent\n )", "def convert_timezone(time):\n if time is None:\n return None\n from_zone = tz.tzutc()\n to_zone = tz.tzlocal()\n try:\n utc = datetime.strptime(time, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n except:\n utc = datetime.strptime(time, \"%Y-%m-%d\")\n utc = utc.replace(tzinfo=from_zone)\n local_date = utc.astimezone(to_zone).date()\n return local_date", "def convert_timezone(time):\r\n if time is None:\r\n return None\r\n from_zone = tz.tzutc()\r\n to_zone = tz.tzlocal()\r\n try:\r\n utc = datetime.strptime(time, \"%Y-%m-%dT%H:%M:%S.%fZ\")\r\n except ValueError:\r\n try:\r\n utc = datetime.strptime(time, \"%Y-%m-%d\")\r\n except ValueError:\r\n return None\r\n\r\n if utc.year < 1900 or utc.year > 9999:\r\n return None\r\n\r\n utc = utc.replace(tzinfo=from_zone)\r\n local_date = utc.astimezone(to_zone).date()\r\n return local_date", "def convert_to_timezone_with_offset(date_val: datetime, tz: str, converted: bool = False, isoformat: bool = True,\n no_offset: bool = True):\n try:\n if not isinstance(date_val, datetime):\n date_val = datetime.strptime(date_val, '%Y-%m-%dT%H:%M:%S.%f%z')\n if not isinstance(date_val, datetime) and type(date_val) is not str:\n date_val = datetime.combine(date_val, datetime.min.time())\n\n if converted:\n res = date_val.replace(tzinfo=pytz.timezone(tz))\n else:\n res = date_val.astimezone(pytz.timezone(tz))\n\n if isoformat:\n if no_offset:\n res = res.replace(tzinfo=None)\n return res.isoformat()\n return res\n except Exception as e:\n print('%s: (%s)' % (type(e), e))", "def tz_aware(value: datetime) -> datetime:\n if settings.USE_TZ:\n value = value.replace(tzinfo=timezone.utc)\n\n return value", "def as_utc_datetime(timespec):\n try:\n dt = as_datetime(timespec, tz=REF_TZ)\n utc_dt = dt.astimezone(pytz.UTC)\n return utc_dt\n except Exception:\n raise DatetimeCoercionFailure(timespec=timespec, timezone=pytz.UTC)", "def may_the_timezone_be_with_it(self):\n self.timestamp = pytz.utc.localize(self.timestamp)", "def local_to_utc(local_dt):\n local_dt = local_dt.replace(tzinfo=tz.tzlocal())\n return local_dt.astimezone(tz.tzlocal())", "def deserialize_datetime_utc(date: str):\n try:\n date_dsrlz = datetime.strptime(date, '%a, %d %b %Y %I:%M:%S %Z').astimezone(timezone(timedelta(0, 0)))\n except Exception:\n try:\n date_dsrlz = datetime.strptime(date, '%a, %d %b %Y %I:%M:%S %Z%z')\n except Exception:\n print(\"Invalid format\")\n return None\n\n return date_dsrlz", "def to_utc(dt):\n time_tuple = time.gmtime(time.mktime(dt.timetuple()))\n return datetime.datetime(*time_tuple[0:6])", "def _convertTZ(self):\n tz = timezone.get_current_timezone()\n dtstart = self['DTSTART']\n dtend = self['DTEND']\n if dtstart.zone() == \"UTC\":\n dtstart.dt = dtstart.dt.astimezone(tz)\n if dtend.zone() == \"UTC\":\n dtend.dt = dtend.dt.astimezone(tz)", "def local_to_utc(local_dt: datetime) -> datetime:\n if local_dt is None:\n return None\n utc_dt = local_dt.astimezone(tz.UTC)\n return utc_dt", "def test_time_to_commute_retrieved_from_google_api_in_posix_is_converted_to_utc(self):\n result = calculate_time_of_commute(\n origin_name='Gatwick Airport',\n destination_name='Kings Cross St Pancras',\n )\n assert type(result) == datetime\n assert result.tzinfo is None # Assert it is a naive datetime", "def tz_to_tz(dt, source_tz, dest_tz):\n dt = dt.replace(tzinfo=tz.gettz(source_tz))\n return dt.replace(tzinfo=tz.gettz(dest_tz))", "def convert_utc_timestamp_in_timezone(utc_timestamp, timezone=\"GMT\"):\n from_zone = tz.gettz('UTC')\n to_zone = tz.gettz(timezone)\n utc = utc_timestamp.replace(tzinfo=from_zone)\n return utc.astimezone(to_zone)", "def test_utc_in_timez(monkeypatch):\n utcoffset8_local_time_in_naive_utc = (\n datetime.datetime(\n year=2020,\n month=1,\n day=1,\n hour=1,\n minute=23,\n second=45,\n tzinfo=datetime.timezone(datetime.timedelta(hours=8)),\n )\n .astimezone(datetime.timezone.utc)\n .replace(tzinfo=None)\n )\n\n class mock_datetime:\n @classmethod\n def utcnow(cls):\n return utcoffset8_local_time_in_naive_utc\n\n monkeypatch.setattr('datetime.datetime', mock_datetime)\n rfc3339_utc_time = str(cherrypy._cplogging.LazyRfc3339UtcTime())\n expected_time = '2019-12-31T17:23:45Z'\n assert rfc3339_utc_time == expected_time", "def _parse_datetime(value):\n return parse(value).replace(tzinfo=pytz.utc) if value else None", "def localize_time_utc(non_utc_time):\n return pytz.utc.localize(non_utc_time)", "def localize_time_utc(non_utc_time):\n return pytz.utc.localize(non_utc_time)", "def tz_convert(self, tz) -> Self:\n tz = timezones.maybe_get_tz(tz)\n\n if self.tz is None:\n # tz naive, use tz_localize\n raise TypeError(\n \"Cannot convert tz-naive timestamps, use tz_localize to localize\"\n )\n\n # No conversion since timestamps are all UTC to begin with\n dtype = tz_to_dtype(tz, unit=self.unit)\n return self._simple_new(self._ndarray, dtype=dtype, freq=self.freq)", "def localToUTC(t, local_tz):\n t_local = local_tz.localize(t, is_dst=None)\n t_utc = t_local.astimezone(pytz.utc)\n return t_utc", "def serialize_datetime_utc(date: datetime):\n if not date.tzinfo:\n date = date.astimezone(timezone(timedelta(0, 0)))\n\n if date.tzinfo.utcoffset(date).seconds == 0:\n return date.strftime(\"%a, %d %b %Y %I:%M:%S %Z\")\n else:\n return date.strftime(\"%a, %d %b %Y %I:%M:%S %Z%z\")", "def getUTCDate(dateString, logger=default_logger):\n\n if(dateString):\n date = dateutil.parser.parse(dateString)\n date = date.astimezone(dateutil.tz.tzutc())\n return date.strftime('%Y-%m-%dT%H:%M:%SZ')\n return dateString", "def as_timezone(dt, tz):\n return dt.astimezone(timezonize(tz))", "def timestamp_aware(dt):\n if dt.tzinfo is None:\n dt = dt.replace(tzinfo=tz.tzlocal()) # new object\n return dt", "def cast_date(*, value: str, params: str, shared: dict) -> datetime:\n param_key, _, param_val = params.partition('=')\n\n date = parse_datestr(value)\n tz = param_val if param_key == 'TZID' else shared['tz'] or 'UTC'\n\n return default_tzinfo(date, gettz(tz))", "def get_zone_aware_datetime(date):\n return datetime.combine(date, datetime.max.time(), pytz.UTC) if date else None", "async def test_process_datetime_to_timestamp_mirrors_utc_isoformat_behavior(\n time_zone, hass: HomeAssistant\n) -> None:\n hass.config.set_time_zone(time_zone)\n datetime_with_tzinfo = datetime(2016, 7, 9, 11, 0, 0, tzinfo=dt_util.UTC)\n datetime_without_tzinfo = datetime(2016, 7, 9, 11, 0, 0)\n est = dt_util.get_time_zone(\"US/Eastern\")\n datetime_est_timezone = datetime(2016, 7, 9, 11, 0, 0, tzinfo=est)\n est = dt_util.get_time_zone(\"US/Eastern\")\n datetime_est_timezone = datetime(2016, 7, 9, 11, 0, 0, tzinfo=est)\n nst = dt_util.get_time_zone(\"Canada/Newfoundland\")\n datetime_nst_timezone = datetime(2016, 7, 9, 11, 0, 0, tzinfo=nst)\n hst = dt_util.get_time_zone(\"US/Hawaii\")\n datetime_hst_timezone = datetime(2016, 7, 9, 11, 0, 0, tzinfo=hst)\n\n assert (\n process_datetime_to_timestamp(datetime_with_tzinfo)\n == dt_util.parse_datetime(\"2016-07-09T11:00:00+00:00\").timestamp()\n )\n assert (\n process_datetime_to_timestamp(datetime_without_tzinfo)\n == dt_util.parse_datetime(\"2016-07-09T11:00:00+00:00\").timestamp()\n )\n assert (\n process_datetime_to_timestamp(datetime_est_timezone)\n == dt_util.parse_datetime(\"2016-07-09T15:00:00+00:00\").timestamp()\n )\n assert (\n process_datetime_to_timestamp(datetime_nst_timezone)\n == dt_util.parse_datetime(\"2016-07-09T13:30:00+00:00\").timestamp()\n )\n assert (\n process_datetime_to_timestamp(datetime_hst_timezone)\n == dt_util.parse_datetime(\"2016-07-09T21:00:00+00:00\").timestamp()\n )", "def tz_localize(\n self,\n tz,\n ambiguous: TimeAmbiguous = \"raise\",\n nonexistent: TimeNonexistent = \"raise\",\n ) -> Self:\n nonexistent_options = (\"raise\", \"NaT\", \"shift_forward\", \"shift_backward\")\n if nonexistent not in nonexistent_options and not isinstance(\n nonexistent, timedelta\n ):\n raise ValueError(\n \"The nonexistent argument must be one of 'raise', \"\n \"'NaT', 'shift_forward', 'shift_backward' or \"\n \"a timedelta object\"\n )\n\n if self.tz is not None:\n if tz is None:\n new_dates = tz_convert_from_utc(self.asi8, self.tz, reso=self._creso)\n else:\n raise TypeError(\"Already tz-aware, use tz_convert to convert.\")\n else:\n tz = timezones.maybe_get_tz(tz)\n # Convert to UTC\n\n new_dates = tzconversion.tz_localize_to_utc(\n self.asi8,\n tz,\n ambiguous=ambiguous,\n nonexistent=nonexistent,\n creso=self._creso,\n )\n new_dates_dt64 = new_dates.view(f\"M8[{self.unit}]\")\n dtype = tz_to_dtype(tz, unit=self.unit)\n\n freq = None\n if timezones.is_utc(tz) or (len(self) == 1 and not isna(new_dates_dt64[0])):\n # we can preserve freq\n # TODO: Also for fixed-offsets\n freq = self.freq\n elif tz is None and self.tz is None:\n # no-op\n freq = self.freq\n return self._simple_new(new_dates_dt64, dtype=dtype, freq=freq)", "def utc():\n return date_from_utc(dt.utcnow())", "def dt(*args, **kwargs):\n \n if 'tz' in kwargs:\n tzinfo = kwargs.pop('tz')\n else:\n tzinfo = kwargs.pop('tzinfo', None)\n \n offset_s = kwargs.pop('offset_s', None) \n trustme = kwargs.pop('trustme', False)\n \n if kwargs:\n raise Exception('Unhandled arg: \"{}\".'.format(kwargs))\n \n if (tzinfo is None):\n # Force UTC if None\n timezone = timezonize('UTC')\n \n else:\n timezone = timezonize(tzinfo)\n \n if offset_s:\n # Special case for the offset\n from dateutil.tz import tzoffset\n if not tzoffset:\n raise Exception('For ISO date with offset please install dateutil')\n time_dt = datetime.datetime(*args, tzinfo=tzoffset(None, offset_s))\n else:\n # Standard timezone\n time_dt = timezone.localize(datetime.datetime(*args))\n\n # Check consistency \n if not trustme and timezone != pytz.UTC:\n if not check_dt_consistency(time_dt):\n raise ValueError('Sorry, time {} does not exists on timezone {}'.format(time_dt, timezone))\n\n return time_dt", "def addTZCleanup(testCase):\n tzIn = environ.get(\"TZ\", None)\n\n @testCase.addCleanup\n def resetTZ():\n setTZ(tzIn)", "def datetime_utc_to_local(dt):\n\n if type(dt) != datetime.datetime:\n raise TypeError(f\"dt must be type datetime.datetime, not {type(dt)}\")\n\n if dt.tzinfo is not datetime.timezone.utc:\n raise ValueError(f\"{dt} must be in UTC timezone: timezone = {dt.tzinfo}\")\n\n return dt.replace(tzinfo=datetime.timezone.utc).astimezone(tz=None)", "def test_freeze_with_timezone_aware_datetime_in_utc():\n utc_now = datetime.datetime.utcnow()\n assert utc_now.tzinfo is None", "def user_timezone(dt, user):\n\ttz = get_config('TIME_ZONE', 'UTC')\n\tif user.is_authenticated():\n\t\tif user.profile.timezone:\n\t\t\ttz = user.profile.timezone\n\ttry:\n\t\tresult = dt.astimezone(pytz.timezone(tz))\n\texcept ValueError:\n\t\t# The datetime was stored without timezone info, so use the\n\t\t# timezone configured in settings.\n\t\tresult = dt.replace(tzinfo=pytz.timezone(get_config('TIME_ZONE', 'UTC'))) \\\n\t\t\t.astimezone(pytz.timezone(tz))\n\treturn result", "def convert_offset(\n datetime_obj: Optional[datetime], tz_offset: str = None, tz_name: str = None\n) -> Optional[datetime]:\n if not datetime_obj or not tz_offset:\n return datetime_obj\n\n try:\n offset = parse_offset(tz_offset, tz_name)\n return datetime_obj.replace(tzinfo=offset)\n except (AttributeError, TypeError, ValueError) as e:\n logger.debug(f'Could not parse offset: {tz_offset}: {str(e)}')\n return None", "def add_timezone(dt: datetime, tz_info: tzinfo = UTC) -> datetime:\n if dt.tzinfo is not None:\n raise ValueError(f\"{dt} is already tz-aware\")\n return dt.replace(tzinfo=tz_info)", "def to_nztimezone(t):\n from dateutil import tz\n utctz = tz.gettz('UTC')\n nztz = tz.gettz('Pacific/Auckland')\n return [ti.replace(tzinfo=utctz).astimezone(nztz) for ti in pd.to_datetime(t)]", "def default_tzinfo(dt, tzinfo):\n if dt.tzinfo is not None:\n return dt\n else:\n return dt.replace(tzinfo=tzinfo)", "def _normalize_date(date):\n if not date:\n return None\n\n if date.tzinfo:\n date = date.replace(tzinfo=None) - date.tzinfo.utcoffset(date)\n return min(date, datetime.utcnow())", "def adjust_datetime_to_timezone(value, from_tz, to_tz=None):\r\n if to_tz is None:\r\n to_tz = settings.TIME_ZONE\r\n if value.tzinfo is None:\r\n if not hasattr(from_tz, \"localize\"):\r\n from_tz = pytz.timezone(smart_str(from_tz))\r\n value = from_tz.localize(value)\r\n return value.astimezone(pytz.timezone(smart_str(to_tz)))", "def convert_to_localtime(dt):\n tz = pytz.timezone('Europe/Stockholm')\n dt = dt.replace(tzinfo=pytz.utc)\n dt = dt.astimezone(tz)\n return dt", "def tz(self, tz):\n return timezones.maybe_get_tz('dateutil/' + tz)", "def parse_utc_datetime(dt_str: str) -> datetime:\n if dt_str is None:\n return None\n\n db_datetime_format = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n dt_utc = datetime.strptime(dt_str, db_datetime_format)\n dt_utc = dt_utc.replace(tzinfo=timezone.utc)\n return dt_utc", "def convert_date(self, date):\n try:\n date_time_obj = datetime.strptime(date, self.date_format)\n timezone = pytz.timezone(\"Europe/Bucharest\")\n timezone.localize(date_time_obj)\n except ValueError:\n\n return False\n\n else:\n\n return date_time_obj", "def remove_time_zone(self, time_stamp):\n return iso8601.parse_date(time_stamp).replace(tzinfo=None).isoformat()", "def local_to_utc(local: pd.Series, tz: Iterable, **kwargs: Any) -> pd.Series:\n return local.groupby(tz).transform(\n lambda x: x.dt.tz_localize(\n datetime.timezone(datetime.timedelta(hours=x.name))\n if isinstance(x.name, (int, float))\n else x.name,\n **kwargs,\n ).dt.tz_convert(None)\n )", "def _maybe_infer_tz(tz: tzinfo | None, inferred_tz: tzinfo | None) -> tzinfo | None:\n if tz is None:\n tz = inferred_tz\n elif inferred_tz is None:\n pass\n elif not timezones.tz_compare(tz, inferred_tz):\n raise TypeError(\n f\"data is already tz-aware {inferred_tz}, unable to \"\n f\"set specified tz: {tz}\"\n )\n return tz", "def tz_aware(dt: datetime, default: tzinfo = tzutc()) -> datetime:\n if dt.tzinfo is None:\n dt = dt.replace(tzinfo=default)\n return dt", "def make_tz_aware(time_string):\n naive_dt = datetime.datetime.strptime(time_string.strip(), '%m/%d/%Y')\n aware_dt = pytz.timezone('Asia/Manila').localize(naive_dt)\n return aware_dt.astimezone(pytz.UTC)", "def utc_datetime(dt):\n return dt + datetime.timedelta(seconds=utc_offset)", "def from_iso(date_string: str, tz_info: tzinfo = UTC) -> datetime:\n date_string = date_string.replace(\"Z\", \"+00:00\")\n dt = datetime.fromisoformat(date_string)\n try:\n return add_timezone(dt, tz_info)\n except ValueError:\n return convert_timezone(dt, tz_info)", "def date_to_local(self, date):\n if date.tzinfo is not None:\n # date is timezone-aware\n date = date.astimezone(self.tz_local)\n\n else:\n # date is a naive date: assume expressed in UTC\n date = date.replace(tzinfo=self.tz_utc)\n # and converted to local time\n date = date.astimezone(self.tz_local)\n\n return date", "def set_timezone(self, to_tz):\n self.startdate = to_tz.localize(self.startdate.replace(tzinfo=None))\n self.enddate = to_tz.localize(self.enddate.replace(tzinfo=None))\n self.timezone = to_tz", "def convertUtcStrToToontownTime(self, dateStr):\n curDateTime = self.getCurServerDateTime()\n try:\n # we changed implementation since time.mktime is giving a incorrect result in published build \n timeTuple = time.strptime(dateStr, self.formatStr)\n utcDateTime = datetime(timeTuple[0], timeTuple[1], timeTuple[2],\n timeTuple[3], timeTuple[4], timeTuple[5],\n timeTuple[6], pytz.utc)\n curDateTime = utcDateTime.astimezone(self.serverTimeZone)\n curDateTime= self.serverTimeZone.normalize(curDateTime)\n except:\n self.notify.warning(\"error parsing date string=%s\" % dateStr)\n pass\n result= curDateTime\n return result", "def convert_timezones(self, to_tzinfo):\n\n # If to_tzinfo is a pytz timezone, then use the normalize\n # method so pytz can do normalize DST transition data\n if isinstance(to_tzinfo, pytz.tzinfo.BaseTzInfo):\n self.dates = np.array(\n [to_tzinfo.normalize(date.astimezone(to_tzinfo))\n for date in self.dates])\n\n # If to_tzinfo is a regular tz_info instance, then just call\n # astimezone\n else:\n self.dates = np.array([date.astimezone(to_tzinfo)\n for date in self.dates])" ]
[ "0.75894195", "0.7289164", "0.7209024", "0.6907422", "0.6864215", "0.6698652", "0.6688846", "0.6686423", "0.6653843", "0.6593803", "0.6576157", "0.6573529", "0.6562767", "0.6553227", "0.65446633", "0.6526314", "0.64945215", "0.64906996", "0.6474339", "0.64524305", "0.6386153", "0.6386153", "0.6386153", "0.63603014", "0.63603014", "0.6357126", "0.63338846", "0.6318626", "0.6317121", "0.6306093", "0.62575567", "0.6245096", "0.62415665", "0.62194324", "0.6209132", "0.6207914", "0.6206477", "0.6181826", "0.6175669", "0.6173003", "0.61555606", "0.6137515", "0.61372364", "0.6135709", "0.61294377", "0.6122999", "0.6118925", "0.61187375", "0.6101603", "0.6099666", "0.6092408", "0.6087563", "0.60743004", "0.6072651", "0.6053631", "0.6050643", "0.60321486", "0.6020981", "0.5977413", "0.5923392", "0.5915318", "0.5893004", "0.5893004", "0.58881396", "0.5886131", "0.58733773", "0.58440894", "0.5837923", "0.58336425", "0.58335817", "0.5824534", "0.58118695", "0.58099174", "0.57742953", "0.5771016", "0.5763496", "0.576319", "0.57627624", "0.57551354", "0.57436323", "0.57388324", "0.5734194", "0.57172406", "0.571459", "0.5714527", "0.570453", "0.57019657", "0.5699342", "0.566498", "0.5630133", "0.5625097", "0.5619553", "0.5618174", "0.56167775", "0.558563", "0.5582333", "0.55746037", "0.5566998", "0.555201", "0.5549714" ]
0.76795125
0
Processing function used to reassemble corrected MIME header lines back into a block of text at the beginning of an email message.
def _merge_broken_header_lines(accumulator, item): cleaned_item = item.strip() for header in _header_list: if item.startswith(header): accumulator.append(cleaned_item) return accumulator try: accumulator[len(accumulator)-1] = accumulator[len(accumulator)-1] + ' ' + cleaned_item except IndexError: # edge case where the first line doesn't start with a header accumulator.append(cleaned_item) return accumulator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fix_broken_hotmail_headers(text):\n end_of_header_match = _end_of_simple_header_pattern.search(text)\n temp_header_text = text[:end_of_header_match.end()].strip()\n lines = temp_header_text.splitlines()[1:] # first line is not a header...\n fixed_header_lines = reduce(_merge_broken_header_lines, lines, [])\n return_text = os.linesep.join(fixed_header_lines) + text[end_of_header_match.end():]\n return return_text", "def process_message(message):\r\n message = gensim.utils.to_unicode(message, 'latin1').strip()\r\n blocks = message.split(u'\\n\\n')\r\n # skip email headers (first block) and footer (last block)\r\n content = u'\\n\\n'.join(blocks[1:])\r\n return content", "def process(self) -> None:\n self.parsed = email.message_from_bytes(self.rawmailcontent, policy=email.policy.EmailPolicy()) # type: email.message.EmailMessage\n\n self.subject = self.parsed[\"subject\"]\n\n if self.parsed[\"X-Jicket-Initial-ReplyID\"] is not None and self.parsed[\"X-Jicket-Initial-ReplyID\"] == self.parsed[\"In-Reply-To\"]:\n self.threadstarter = True\n elif self.config.ticketAddress in self.parsed[\"From\"]: # Take more heuristic approach\n self.threadstarter = True\n\n self.rawmailcontent = None # No need to store after processing\n\n self.get_text_bodies(self.parsed)\n self.textfrombodies()", "def _header_transformer(self, lines):\n needle = b'--%s\\n' % self.boundary\n in_header = False\n for line in lines:\n if line == needle:\n in_header = True\n if in_header:\n assert line[-1] == b'\\n'\n line = line[:-1] + b'\\r\\n'\n if line == b'\\r\\n':\n in_header = False\n yield line", "def fix_broken_yahoo_headers(text):\n end_of_header_match = _end_of_multipart_header_pattern.search(text)\n temp_header_text = text[:end_of_header_match.end()].strip()\n lines = temp_header_text.splitlines()\n fixed_header_lines = reduce(_merge_broken_header_lines, lines, [])\n return_text = os.linesep.join(fixed_header_lines) + '\\r\\n\\r\\n' + text[end_of_header_match.end():]\n return return_text", "def prepend_header(rendered_header):\n debug(\"adding header\")\n _range = CURRENT_BUFFER.range(0, 0)\n _range.append(rendered_header.split(\"\\n\"))", "def _preprocess(self, body):\n return body", "def transform(self, email_path):\n mail = open(email_path, 'r')\n content = mail.read(self.max_read_len)\n i = 0\n while not(content[i] == '\\n' and content[i + 1] == '\\n') and i < len(content) - self.ngram:\n i += 1\n header = content[:i]\n # TODO find a smarter way deal with the header-body problem\n body = content[i + 2:]\n if len(body) + len(header) > self.max_read_len:\n body = body[:max(1000, self.max_read_len - len(header))]\n header_set = self.tokenize(header)\n body_set = self.tokenize(body)\n mail.close()\n return (header_set, body_set)", "def _format_message(start_line, header, body):\n # The message begins with the start line, terminated with EOL and encoded.\n msg = (start_line + EOL).encode(HEADER_ENCODING)\n # Convert the header to lines.\n header_lines = _format_header(header)\n # Add them to the message, one by one, each terminated with EOL and encoded.\n for line in header_lines:\n msg = msg + (line + EOL).encode(HEADER_ENCODING)\n # A blank line indicates end of headers.\n msg = msg + EOL.encode(HEADER_ENCODING)\n # The rest of the message is the body.\n msg = msg + body\n return msg", "def _headercorrected(hdr):\n # COM*** -> COMMENT\n i = 1\n while 'COM%03d' % i in hdr:\n value = hdr['COM%03d' % i]\n comment = hdr.cards['COM%03d' % i].comment\n hdr['COMMENT'] = '[%s] %s' % (comment, value)\n del hdr['COM%03d' % i]\n i += 1\n # HIST*** -> HISTORY\n i = 1\n while 'HIST%03d' % i in hdr:\n value = hdr['HIST%03d' % i]\n comment = hdr.cards['HIST%03d' % i].comment\n hdr['HISTORY'] = '%s (%s)' % (value, comment)\n del hdr['HIST%03d' % i]\n i += 1\n # ORIGIN -> FROM\n if 'ORIGIN' in hdr.keys():\n hdr.rename_keyword('ORIGIN', 'FROM')\n if 'ORIGIN_V' in hdr.keys():\n hdr.rename_keyword('ORIGIN_V', 'FROM_V')\n # SOURCE_V -> FORMAT\n if 'SOURCE_V' in hdr.keys():\n hdr.rename_keyword('SOURCE_V', 'FORMAT')\n # SRC_VERS -> SRC_V\n if 'SRC_VERS' in hdr.keys():\n hdr.rename_keyword('SRC_VERS', 'SRC_V')", "def preprocess(sent):\n return sent", "def prepare_msg(raw_message):\n\n raw_message = str(raw_message)\n\n raw_message = raw_message.lower()\n raw_message = raw_message.replace(\"bismarkb1996\", \"\")\n raw_message = raw_message.replace(\"id336383265\", \"\")\n raw_message = re.sub('[^а-яА-Яa-zA-Z0-9\\\\s\\\\-]+', '', raw_message)\n\n split_message = raw_message.split(\" \")\n logger.debug(\"Split message: \" + str(split_message))\n\n message = []\n for msg in [x.split(\"-\") for x in split_message]:\n for i in msg:\n if i != \"\":\n message.append(i)\n\n return message", "def process_header(line, new_ids=None):\n # extraneous headers\n if line.startswith('##') and not any(\n line.startswith('##' + header_type)\n for header_type in ('INFO', 'FILTER', 'FORMAT', 'ALT', 'contig')\n ):\n return None\n\n # non-whitelisted annotations\n match = re.match(r'##INFO=<ID=([^,]+)', line)\n if match:\n info_name = match.group(1)\n if info_name not in WHITELISTED_ANNOTATIONS:\n return None\n\n if line.startswith('#CHROM') and new_ids is not None:\n fields = line.strip().split('\\t')[:9] # fixed headers\n fields.extend(new_ids)\n line = '\\t'.join(fields) + '\\n'\n\n return line", "def decode_message_header(self):\n data_iter = PreviewIterator(self._mms_data)\n\n # First 3 headers (in order\n ############################\n # - X-Mms-Message-Type\n # - X-Mms-Transaction-ID\n # - X-Mms-Version\n # TODO: reimplement strictness - currently we allow these 3 headers\n # to be mixed with any of the other headers (this allows the\n # decoding of \"broken\" MMSs, but is technically incorrect)\n\n # Misc headers\n ##############\n # The next few headers will not be in a specific order, except for\n # \"Content-Type\", which should be the last header\n # According to [4], MMS header field names will be short integers\n content_type_found = False\n header = ''\n while content_type_found == False:\n try:\n header, value = self.decode_header(data_iter)\n except StopIteration:\n break\n\n if header == mms_field_names[0x04][0]:\n content_type_found = True\n else:\n self._mms_message.headers[header] = value\n\n if header == 'Content-Type':\n # Otherwise it might break Content-Location\n # content_type, params = value\n self._mms_message.headers[header] = value\n\n return data_iter", "def preprocess(self, message):\n self._call_extension_method('preprocess', message)", "def parse_header(self):\n bodystart=re.compile(r\"<body>\", re.IGNORECASE).search(self.lines).span()[1]\n oldheader=self.lines[0:bodystart]\n start=re.compile(\"<title>\", re.IGNORECASE).search(oldheader).span()[1]\n finish=re.compile(\"</title>\", re.IGNORECASE).search(oldheader).span()[0]\n titles=oldheader[start:finish].split(\"--\")\n # Duplicate if needed\n if len(titles)==1: titles.append(titles[0])\n self.title, self.header= {}, {}\n for i, lang in enumerate(LANGS):\n self.title[lang]=titles[i]\n self.header[lang]=\"%s%s%s\" % (oldheader[:start], self.title[lang], oldheader[finish:],)", "def process_message(mail):\n\tmessage = email.message_from_string(mail)\t#parsing metadata\n\tdatetuple = email.utils.parsedate_tz(message.__getitem__('Date'))\n\tfiledirectory = basedirectory\n\tif not datetuple:\n\t\tdatetuple = email.utils.parsedate_tz(message.__getitem__('Delivery-date'))\n\tif directory_for_year: \n\t\tfiledirectory = os.path.join(filedirectory, str(datetuple[0]))\n\tif directory_for_month:\n\t\tfiledirectory = os.path.join(filedirectory, str(datetuple[1])) \n\tdateposix = email.utils.mktime_tz(datetuple)\n\tlocaldate = datetime.datetime.fromtimestamp(dateposix)\n\tdatestring = localdate.strftime('%Y%m%d-%H%M') # +'-'+'-'.join(time.tzname) #\n\tsender = email.utils.parseaddr(message['To'])[1].replace('@','_').replace('.','-')\n\tsubject = email.header.decode_header(message['Subject'])[0][0]\n\tfilename = datestring + '_' + sender[:60] + '_' + subject[:60]\n\n\t# parsing mail content\n\tmailstring = ''\n\tfor headername, headervalue in message.items():\n\t\tmailstring += headername + ': ' + headervalue + '\\r\\n'\t# add \\r\\n or\n\tif message.get_content_maintype() == 'text':\n\t\tmailstring += message.get_payload(decode=True)\n\n\t# handle multipart: \n\telif message.get_content_maintype() == 'multipart':\n\t\tpartcounter = 0\n\t\tfor part in message.walk():\n\t\t\tif part.get_content_maintype() == 'text':\t# also: text/html\n\t\t\t\tfor header, value in part.items():\n\t\t\t\t\tmailstring += header + ': ' + value + '\\r\\n'\n\t\t\t\t\tmailstring += '\\r\\n' + part.get_payload(decode=True) + '\\r\\n'\n\t\t\t# skip multipart containers\n\t\t\telif part.get_content_maintype() != 'multipart':\n\t\t\t\tpartcounter += 1\n\t\t\t\ttry:\n\t\t\t\t\tattachmentname = email.header.decode_header(part.get_filename())[0][0]\n\t\t\t\texcept:\n\t\t\t\t\tattachmentname = \"\"\n\t\t\t\t\tprint(\"Error when parsing filename.\")\n\t\t\t\tif not attachmentname:\n\t\t\t\t\text = mimetypes.guess_extension(part.get_content_type())\n\t\t\t\t\tif not ext:\n\t\t\t\t\t\text = '.bin'\t# use generic if unknown extension\n\t\t\t\t\tattachmentname = 'attachment' + str(partcounter) + ext\n\t\t\t\tattfilename = filename + '_' + attachmentname\n\t\t\t\twrite_to_file(filedirectory, attfilename, part.get_payload(decode=True))\n\twrite_to_file(filedirectory, filename+'.txt', mailstring)", "def header(self):\n\t\tthismsg = \"\\r\\n\"+self.ESC + \"0m \" +self.A220 + self.A220 + self.A220 + self.A220 + self.A220 +\" \" + self.ESC + \"0m\\r\\n\"\n\t\tthismsg += \" \"+self.A220+self.A220+self.A219+self.A219+self.A219+self.A219+self.ESC+\"1;47m\"+self.A176+self.A177+self.A178+self.ESC+\"40m\"+self.A219+self.A219+self.A220+self.A220+self.ESC+\"0m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"1m \"+self.ESC+\"31mS\"+self.ESC+\"0;31mAGA\"+self.ESC+\"37m \"+self.A219+self.A219+self.ESC+\"30;47mo\"+self.ESC+\"37;40m\"+self.A219+self.ESC+\"1;47m\"+self.A176+self.A176+self.A177+self.A177+self.A178+self.ESC+\"40m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"0m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"1m \"+self.ESC+\"31mO\"+self.ESC+\"0;31mF THE\"+self.ESC+\"37m \"+self.A219+self.A219+self.ESC+\"30;47mO\"+self.ESC+\"37;40m\"+self.A219+self.A219+self.ESC+\"1;47m\"+self.A176+self.A177+self.A177+self.A178+self.ESC+\"40m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"0m \"+self.A220+self.A220+self.A220+self.ESC+\"1m\"+self.A220+self.A220+self.ESC+\"0m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"1m \"+self.ESC+\"31mR\"+self.ESC+\"0;31mED\"+self.ESC+\"37m \"+self.A219+self.ESC+\"30;47mo\"+self.ESC+\"37;40m\"+self.A219+self.A219+self.ESC+\"1;47m\"+self.A176+self.A176+self.A177+self.A178+self.ESC+\"40m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"0m \"+self.A223+self.A219+self.ESC+\"1;47m\"+self.A176+self.A219+self.A219+self.A219+self.ESC+\"40m\"+self.A220+self.A220+\" \"+self.ESC+\"0;31m\"+self.A220+self.ESC+\"37m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"31m \"+self.ESC+\"1mD\"+self.ESC+\"0;31mRAGON 0.9.9\"+self.ESC+\"37m \"+self.A223+self.A219+self.A219+self.ESC+\"1;47m\"+self.A176+self.A177+self.A177+self.A178+self.A219+self.ESC+\"40m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A223+self.ESC+\"0m \"+self.A219+self.ESC+\"1;47m\"+self.A219+self.A219+self.ESC+\"40m\"+self.A223+self.A223+self.ESC+\"0;31m\"+self.A220+self.ESC+\"1;41m\"+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+self.ESC+\"37m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"31m concept\"+self.ESC+\"37m \"+self.A223+self.ESC+\"1;47m\"+self.A176+self.A177+self.A177+self.A178+self.A178+self.ESC+\"40m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A223+self.ESC+\"0m \"+self.ESC+\"1m\"+self.A220+self.ESC+\"0m \"+self.ESC+\"1m\"+self.A220+self.ESC+\"0m \"+self.ESC+\"31m\"+self.A220+self.A220+self.ESC+\"1;41m\"+self.A176+self.A178+\" \"+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+self.A220+self.ESC+\"37m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"31m Seth Robinson \"+self.ESC+\"37m\"+self.A222+\" \"+self.A223+self.A223+self.ESC+\"1;47m\"+self.A178+self.ESC+\"40m\"+self.A219+self.A219+self.A219+self.A219+self.A223+self.A223+self.ESC+\"0m \"+self.A220+self.ESC+\"1;47m\"+self.A220+self.ESC+\"40m\"+self.A223+\" \"+self.ESC+\"0m\"+self.A220+self.A220+self.ESC+\"1;47m\"+self.A220+self.ESC+\"40m\"+self.A223+self.A223+\" \"+self.ESC+\"0m\"+self.A223+self.A219+self.A220+self.ESC+\"1m\"+self.A220+\" \"+self.ESC+\"0;31m\"+self.A223+self.ESC+\"1;41m\"+self.A176+self.A177+self.A178+\" \"+self.ESC+\"0;31m\"+self.A223+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+self.ESC+\"37m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"31m by\"+self.ESC+\"0m \"+self.A219+\" \"+self.A220+self.ESC+\"1;47m\"+self.A220+self.ESC+\"40m\"+self.A223+\" \"+self.ESC+\"0m\"+self.A220+self.ESC+\"1;47m\"+self.A220+self.A220+self.A219+self.ESC+\"40m\"+self.A223+self.ESC+\"0m \"+self.A223+self.ESC+\"1;47m\"+self.A176+self.A219+self.ESC+\"40m\"+self.A220+\" \"+self.ESC+\"0;31m\"+self.A223+self.ESC+\"1;41m\"+self.A177+self.A178+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A176+\" \"+self.A176+\" \"+self.A176+self.ESC+\"0;31m\"+self.A220+self.ESC+\"37m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"1;34m J\"+self.ESC+\"0;34m.\"+self.ESC+\"1mT\"+self.ESC+\"0;34m.\"+self.ESC+\"1mS\"+self.ESC+\"0;34mage\"+self.ESC+\"0m \"+self.A219+self.ESC+\"1;30;47m\"+self.A176+self.ESC+\"0;37;40m\"+self.A221+\" \"+self.A220+self.ESC+\"1;47m\"+self.A177+self.A176+self.ESC+\"40m\"+self.A223+\" \"+self.ESC+\"0m\"+self.A220+self.ESC+\"1;47m\"+self.A220+self.A220+self.A219+self.ESC+\"40m\"+self.A223+self.ESC+\"0m \"+self.A223+self.ESC+\"1;47m\"+self.A177+self.A219+self.A219+self.ESC+\"40m\"+self.A220+\" \"+self.ESC+\"0;31m\"+self.A223+self.ESC+\"1;41m\"+self.A177+self.A176+self.A178+\" \"+self.ESC+\"0;31m\"+self.A220+self.A223+self.ESC+\"1;41m\"+self.A176+self.A178+self.A176+self.A176+self.A177+self.A177+self.ESC+\"0;37;40m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += \" \"+self.A219+self.A219+\" \"+self.ESC+\"1;47m\"+self.A176+self.A177+self.A219+self.ESC+\"40m\"+self.A223+\" \"+self.ESC+\"0m\"+self.A223+self.ESC+\"1m\"+self.A223+self.ESC+\"41m\"+self.A223+self.ESC+\"0;31m\"+self.A220+self.A220+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A178+self.A177+self.A176+\" \"+self.A220+self.A220+self.A220+self.A220+self.A223+self.A220+self.ESC+\"1;41m\"+self.A176+self.ESC+\"0;31m\"+self.A220+self.ESC+\"1;41m\"+self.A177+self.ESC+\"0;31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"37;40m \"+self.ESC+\"31m\"+self.A223+self.ESC+\"1;41m\"+self.A177+self.A178+\" \"+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A178+self.A178+self.A176+self.A177+self.A177+self.A219+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.A219+self.ESC+\"1;30;47m\"+self.A176+self.ESC+\"0;37;40m \"+self.A219+self.A223+self.ESC+\"1m\"+self.A223+self.ESC+\"0;31m\"+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+self.A223+self.A223+\" \"+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+self.A223+self.A223+\" \"+self.A176+self.A176+\" \"+self.ESC+\"1;41m\"+self.A176+\" \"+self.A178+self.A178+self.A219+self.ESC+\"0;31m\"+self.A220+\" \"+self.A223+self.ESC+\"1;41m\"+self.A177+self.A176+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A176+self.A178+self.A176+self.A177+self.A177+self.A219+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.A219+self.ESC+\"1;30;47m\"+self.A176+self.ESC+\"0;37;40m \"+self.A219+\" \"+self.ESC+\"31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+\" \"+self.A220+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A178+self.A177+self.A176+self.ESC+\"37m \"+self.ESC+\"31m\"+self.A178+self.A177+self.A177+self.A223+\" \"+self.ESC+\"1;41m\"+self.A176+\" \"+self.A177+self.A178+self.A219+\" \"+self.ESC+\"0;31m\"+self.A220+\" \"+self.A223+self.ESC+\"1;41m\"+self.A177+self.A219+self.ESC+\"0;31m\"+self.A220+self.A223+self.ESC+\"1;41m\"+self.A178+\" \"+self.A177+self.A178+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1;30;47m\"+self.A176+self.ESC+\"0;37;40m\"+self.A219+\" \"+self.A219+\" \"+self.ESC+\"31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"37;40m \"+self.ESC+\"31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A178+self.A177+self.A176+\" \"+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+self.A223+self.ESC+\"37m \"+self.ESC+\"1;31;41m\"+self.A176+\" \"+self.A178+self.A177+self.A219+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A176+self.A177+self.A178+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A220+self.A220+self.A223+self.ESC+\"1;41m\"+self.A178+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1;30;47m\"+self.A176+self.ESC+\"0;37;40m\"+self.A219+self.ESC+\"1;30;47m\"+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A219+self.A178+self.A178+self.A177+self.A176+self.A223+\" \"+self.A220+self.A220+\" \"+self.A223+self.ESC+\"37m \"+self.ESC+\"1;31;41m\"+self.A176+\" \"+self.A177+self.A178+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A177+self.A178+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A176+self.A178+self.A178+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.A219+self.A219+self.A219+\" \"+self.ESC+\"31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A221+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A178+self.A223+self.A223+self.A223+\" \"+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+self.A223+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+\" \"+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A178+self.A178+self.A177+self.A223+\" \"+self.ESC+\"1;41m\"+self.A176+\" \"+self.A177+self.A219+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A176+self.A177+\" \"+self.ESC+\"0;31m\"+self.A220+self.ESC+\"1;41m\"+self.A176+self.A178+self.A178+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.A222+self.ESC+\"1;30;47m\"+self.A176+self.A177+self.ESC+\"0;37;40m\"+self.A221+\" \"+self.ESC+\"31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+\" \"+self.ESC+\"0;31m\"+self.A223+\" \"+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.A220+\" \"+self.A223+self.A223+self.A220+\" \"+self.A223+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A178+self.A178+self.A178+self.ESC+\"37m \"+self.ESC+\"1;31;41m\"+self.A176+\" \"+self.A178+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A223+self.ESC+\"1;41m\"+self.A176+self.A177+self.A178+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A176+self.A178+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.A222+self.ESC+\"1;30;47m\"+self.A176+self.A177+self.ESC+\"0;37;40m\"+self.A219+\" \"+self.ESC+\"31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.ESC+\"0;31m\"+self.A223+self.A223+\" \"+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.A222+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.A220+\" \"+self.A223+self.A178+self.ESC+\"37m \"+self.ESC+\"1;31;41m\"+self.A176+self.A177+self.A178+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A176+self.A177+\" \"+self.A178+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A176+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += \" \"+self.A219+self.ESC+\"1;30;47m\"+self.A177+self.ESC+\"0;37;40m\"+self.A219+\" \"+self.ESC+\"31m\"+self.A222+self.A219+self.ESC+\"1;41m\"+self.A176+self.A176+self.ESC+\"0;31m\"+self.A221+\" \"+self.ESC+\"1;5;32m\"+self.A220+self.A220+self.A223+\" \"+self.ESC+\"0;31;41m \"+self.ESC+\"40m\"+self.A178+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+self.A223+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.A222+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+\" \"+self.A220+self.A220+self.A220+self.A220+self.A223+self.ESC+\"1;41m\"+self.A176+self.A177+self.A178+self.ESC+\"0;31m\"+self.A220+\" \"+self.ESC+\"1;41m\"+self.A176+self.A177+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A176+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.A219+self.ESC+\"1;30;47m\"+self.A176+self.ESC+\"0;37;40m\"+self.A219+\" \"+self.ESC+\"31m\"+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"1;5;32m\"+self.A219+self.A219+self.A223+\" \"+self.ESC+\"0;31m\"+self.A222+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A178+self.A220+self.A222+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.A222+self.A219+self.ESC+\"41m \"+self.ESC+\"37;40m \"+self.ESC+\"31;41m \"+self.ESC+\"40m\"+self.A178+self.A223+\" \"+self.A223+self.ESC+\"1;41m\"+self.A176+self.A177+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A176+self.A177+self.A219+self.A178+self.ESC+\"0;37;40m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += \" \"+self.A222+self.ESC+\"1;30;47m\"+self.A176+self.A177+self.ESC+\"0;37;40m\"+self.A219+\" \"+self.ESC+\"31m\"+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A178+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"1;5;32m\"+self.A223+\" \"+self.ESC+\"0;31m\"+self.A220+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A178+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+\" \"+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.A222+self.A219+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+\" \"+self.A223+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A178+self.A223+\" \"+self.A223+self.ESC+\"1;41m\"+self.A176+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.A177+self.A219+self.A178+self.ESC+\"37;40m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += \" \"+self.A219+self.ESC+\"1;30;47m\"+self.A176+self.ESC+\"0;37;40m\"+self.A221+\" \"+self.ESC+\"31m\"+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A177+self.A176+self.ESC+\"0;31m\"+self.A221+self.A220+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.ESC+\"0;31m\"+self.A223+self.A223+\" \"+self.A222+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.A176+self.ESC+\"0;31m\"+self.A220+self.A220+self.ESC+\"37m \"+self.ESC+\"1;31;41m\"+self.A176+self.A177+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.A176+self.A177+self.A219+self.A178+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.A219+self.ESC+\"1;30;47m\"+self.A177+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A220+self.A220+self.A223+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A176+self.A222+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+self.ESC+\"37m \"+self.ESC+\"31m\"+self.A220+self.ESC+\"1;41m\"+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A177+self.A177+self.A176+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A220+\" \"+self.ESC+\"1;41m\"+self.A176+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.A177+\" \"+self.A219+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.A219+self.A221+\" \"+self.ESC+\"31;41m \"+self.ESC+\"37;40m \"+self.ESC+\"31;41m \"+self.ESC+\"40m\"+self.A177+self.A177+self.A176+self.A178+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A177+self.A176+self.A176+\" \"+self.ESC+\"0;31m\"+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A178+self.A177+self.A176+self.ESC+\"0;31m\"+self.A220+\" \"+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A178+self.A220+self.A220+\" \"+self.ESC+\"1;41m\"+self.A178+self.ESC+\"0;31m\"+self.A220+\" \"+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A177+self.A219+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1m\"+self.A220+\" \"+self.ESC+\"0m\"+self.A223+\" \"+self.ESC+\"31;41m \"+self.ESC+\"40m\"+self.A223+\" \"+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A219+self.A178+self.A178+self.A177+self.A176+self.A176+self.A177+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.A178+self.A177+self.A177+self.A176+\" \"+self.ESC+\"40m\"+self.A220+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A178+self.A177+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A223+self.A178+\" \"+self.ESC+\"1;41m\"+self.A177+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.A176+self.A176+self.A178+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.A223+self.ESC+\"1m\"+self.A219+self.A220+\" \"+self.ESC+\"0;31;41m \"+self.A177+self.A178+self.A176+self.A176+\" \"+self.ESC+\"40m\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A219+self.A176+self.A178+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.A219+self.A178+self.A177+self.A176+self.ESC+\"37;40m \"+self.ESC+\"31m\"+self.A223+\" \"+self.ESC+\"1;41m\"+self.A176+self.ESC+\"0;31m\"+self.A223+\" \"+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.A177+self.A219+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += \" \"+self.A223+self.ESC+\"1;47m\"+self.A223+self.ESC+\"40m\"+self.A219+self.A223+self.ESC+\"0;31m\"+self.A220+self.ESC+\"1;41m\"+self.A178+self.A176+self.A176+\" \"+self.ESC+\"0;31m\"+self.A223+\" \"+self.ESC+\"1;37;47m\"+self.A222+self.ESC+\"40m\"+self.A221+self.A223+self.A220+\" \"+self.ESC+\"0;31m\"+self.A177+\" \"+self.A223+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A221+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.ESC+\"0;31m\"+self.A223+\" \"+self.A220+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+self.A223+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;31m\"+self.A223+\" \"+self.ESC+\"1;41m\"+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.A176+\" \"+self.A177+self.A219+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.A223+self.ESC+\"31m\"+self.A222+self.ESC+\"1;41m\"+self.A177+self.A176+\" \"+self.ESC+\"0;31m\"+self.A223+\" \"+self.ESC+\"1;37;47m\"+self.A222+self.ESC+\"40m\"+self.A221+\" \"+self.ESC+\"47m\"+self.A222+self.ESC+\"40m\"+self.A221+\" \"+self.ESC+\"0;31m\"+self.A178+self.A177+\" \"+self.A223+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"33m\"+self.A220+\" \"+self.ESC+\"31m\"+self.A223+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A176+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+self.A223+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.ESC+\"40m\"+self.A220+self.A223+\" \"+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.A176+\" \"+self.A178+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"31;41m \"+self.A176+self.A176+self.ESC+\"37;40m \"+self.A220+self.ESC+\"1m\"+self.A219+self.ESC+\"0m\"+self.A223+self.ESC+\"1m\"+self.A219+self.A221+\" \"+self.A223+\" \"+self.A223+self.A220+\" \"+self.ESC+\"0;31;41m \"+self.ESC+\"40m\"+self.A223+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+\" \"+self.ESC+\"33m\"+self.A219+self.A219+\" \"+self.ESC+\"31m\"+self.A223+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.A178+self.A177+self.A176+self.ESC+\"40m\"+self.A220+\" \"+self.ESC+\"1;41m\"+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.A176+self.A177+\" \"+self.A219+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += \" \"+self.A220+self.ESC+\"1m\"+self.A219+\" \"+self.A219+self.A221+\" \"+self.A223+\" \"+self.A220+\" \"+self.A223+self.A220+\" \"+self.ESC+\"0;31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+\" \"+self.ESC+\"1;33;43m\"+self.A177+self.A176+self.ESC+\"0;33m\"+self.A219+self.A219+\" \"+self.ESC+\"31m\"+self.A223+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;31m\"+self.A219+self.A220+self.A222+self.ESC+\"1;41m\"+self.A219+self.A177+self.A176+self.ESC+\"0;31m\"+self.A220+self.ESC+\"1;41m\"+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+\" \"+self.A178+self.A219+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += \" \"+self.A223+self.ESC+\"1m\"+self.A220+\" \"+self.A223+\" \"+self.A220+self.A223+self.A220+\" \"+self.A223+\" \"+self.A223+self.ESC+\"0;31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+self.A223+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+\" \"+self.ESC+\"1;33m\"+self.A223+self.ESC+\"43m\"+self.A219+self.A219+self.ESC+\"40m\"+self.A223+self.ESC+\"0;33m\"+self.A220+self.A219+\" \"+self.ESC+\"31;41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;31m\"+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A177+self.A177+self.A176+self.ESC+\"0;31m\"+self.A223+\" \"+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.A177+self.A178+\" \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1m\"+self.A220+\" \"+self.ESC+\"0;31m\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+self.ESC+\"37m \"+self.ESC+\"1;33;43m\"+self.A177+self.A176+self.ESC+\"0;33m\"+self.A219+self.A219+self.A220+\" \"+self.ESC+\"31;41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.A176+self.A177+self.A178+\" \"+self.A219+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"30mÙ\"+self.ESC+\"31m\"+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A177+self.A177+self.A176+self.A176+\" \"+self.ESC+\"0;31m\"+self.A223+self.A223+self.ESC+\"37m \"+self.ESC+\"1;33m\"+self.A223+self.ESC+\"43m\"+self.A219+self.ESC+\"0;33m\"+self.A223+self.A223+self.A223+self.A220+\" \"+self.ESC+\"31;41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+self.A222+self.A223+\" \"+self.A220+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A177+self.A176+\" \"+self.A219+\" \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"31m\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.ESC+\"37m \"+self.ESC+\"1;33m\"+self.A220+self.A220+self.ESC+\"43m\"+self.A176+self.ESC+\"0;33m\"+self.A219+self.A219+\" \"+self.ESC+\"31;41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A220+self.ESC+\"1;41m\"+self.A176+self.A177+self.A178+self.A176+self.A176+\" \"+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += \" \"+self.ESC+\"1;33;43m\"+self.A219+self.A178+self.A177+self.A176+self.ESC+\"0;33m\"+self.A219+\" \"+self.ESC+\"31m\"+self.A223+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+\" \"+self.A223+self.ESC+\"1;41m\"+self.A176+self.A177+self.A177+\" \"+self.ESC+\"0;31m\"+self.A223+self.ESC+\"37m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += \" \"+self.ESC+\"1;33;43m\"+self.A219+self.A178+self.ESC+\"40m\"+self.A223+self.ESC+\"0;33m\"+self.A223+self.A223+self.A220+self.A220+\" \"+self.ESC+\"31;41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A177+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+\" \"+self.A223+self.ESC+\"1;41m\"+self.A177+self.A177+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1;33m\"+self.A220+self.A220+self.ESC+\"43m\"+self.A219+self.A178+self.A177+self.A176+self.ESC+\"0;33m\"+self.A219+self.A220+\" \"+self.ESC+\"31;41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A177+self.A176+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A223+self.ESC+\"41m \"+self.ESC+\"37;40m \"+self.ESC+\"31;41m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1;33m\"+self.A220+self.ESC+\"43m\"+self.A219+self.ESC+\"40m\"+self.A223+self.A223+self.A223+self.ESC+\"0;33m\"+self.A223+self.A220+self.A220+self.A220+\" \"+self.ESC+\"31;41m \"+self.ESC+\"1m\"+self.A178+self.A177+self.A176+self.ESC+\"0;31m\"+self.A223+self.A223+self.A223+self.A223+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+\" \"+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+\" \"+self.A223+self.ESC+\"41m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1;33m\"+self.A220+self.A220+self.ESC+\"43m\"+self.A219+self.A219+self.A178+self.A177+self.A176+self.ESC+\"0;33m\"+self.A219+self.A219+\" \"+self.ESC+\"31m\"+self.A223+self.A223+\" \"+self.A220+self.A220+self.A220+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+\" \"+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+\" \"+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.A220+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1;33;43m\"+self.A219+self.A219+self.ESC+\"40m\"+self.A223+self.A223+self.A223+self.ESC+\"0;33m\"+self.A223+self.A223+self.ESC+\"31m\"+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.A178+self.A177+\" \"+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+\" \"+self.ESC+\"41m \"+self.ESC+\"37;40m \"+self.ESC+\"31m\"+self.A223+self.ESC+\"41m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1;33m\"+self.A220+self.ESC+\"43m\"+self.A178+self.ESC+\"0;33m\"+self.A220+self.A219+self.A223+\" \"+self.ESC+\"31;41m \"+self.ESC+\"1m\"+self.A176+self.A178+self.A177+self.A177+self.A176+self.A176+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+\" \"+self.A223+self.ESC+\"41m \"+self.ESC+\"37;40m \"+self.ESC+\"31;41m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1;33;43m\"+self.A178+self.A177+self.A176+self.ESC+\"0;33m\"+self.A223+self.A223+\" \"+self.ESC+\"31;41m \"+self.ESC+\"1m\"+self.A176+self.A178+self.A178+\" \"+self.A177+\" \"+self.A176+self.A176+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.ESC+\"37;40m \"+self.ESC+\"31m\"+self.A223+self.ESC+\"41m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1;33m\"+self.A220+self.A220+self.ESC+\"0;33m\"+self.A220+self.A220+self.A223+\" \"+self.ESC+\"31;41m \"+self.ESC+\"1m\"+self.A176+self.A219+self.A178+self.A178+self.A177+self.A177+self.A176+\" \"+self.A176+\" \"+self.A176+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+\" \"+self.ESC+\"41m \"+self.ESC+\"37;40m \"+self.ESC+\"31;41m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"0;37m \"+self.ESC+\"1;33m\"+self.A223+self.A223+self.A223+self.ESC+\"0;33m\"+self.A223+\" \"+self.ESC+\"30;41m \"+self.ESC+\"1;31mShatterstar [W/X] \"+self.ESC+\"0;37;40m \"+self.ESC+\"30;41m \"+self.ESC+\"37;40m \"+self.ESC+\"30;41m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"0m\\r\\n\"\n\t\treturn thismsg", "def convert_headers(mkd):\n\t\n\tfor md_code in re.findall(r\"^#####[^#].*\", mkd, re.M):\n\t\ttex_code = \"\\subparagraph{\" + re.findall(r\"#####(.*)\", md_code, re.M)[0] + \"}\"\n\t\tmkd = mkd.replace(md_code, tex_code)\n\n\tfor md_code in re.findall(r\"^####[^#].*\", mkd, re.M):\n\t\ttex_code = \"\\paragraph{\" + re.findall(r\"####(.*)\", md_code, re.M)[0] + \"}\"\n\t\tmkd = mkd.replace(md_code, tex_code)\n\n\tfor md_code in re.findall(r\"^###[^#].*\", mkd, re.M):\n\t\ttex_code = \"\\subsubsection{\" + re.findall(r\"###(.*)\", md_code, re.M)[0] + \"}\"\n\t\tmkd = mkd.replace(md_code, tex_code)\n\n\tfor md_code in re.findall(r\"^##[^#].*\", mkd, re.M):\n\t\ttex_code = \"\\subsection{\" + re.findall(r\"##(.*)\", md_code, re.M)[0] + \"}\"\n\t\tmkd = mkd.replace(md_code, tex_code)\n\n\tfor md_code in re.findall(r\"^#[^#].*\", mkd, re.M):\n\t\ttex_code = \"\\section{\" + re.findall(r\"#(.*)\", md_code, re.M)[0] + \"}\"\n\t\tmkd = mkd.replace(md_code, tex_code)\n\n\treturn mkd", "def parse_header(self):", "def preprocess_msg(self):\n self.tmp_msg = self.tmp_msg.lower()\n cleared = ''\n for ch in self.tmp_msg:\n if ch in string.ascii_lowercase:\n cleared += ch\n\n c = ''\n for ch in cleared:\n c += '{:02d}'.format(ord(ch) - 97)\n if len(c) % 4 != 0:\n c += '99'\n self.tmp_msg = c\n\n super().preprocess_msg()", "def remove_header_subject(text):\n _before, _blankline, after = text.partition('\\n\\n')\n sub = [l for l in _before.split(\"\\n\") if \"Subject:\" in l]\n final = sub[0] + \"\\n\" + after\n return final", "def _parse_message(msg):\n lines, body = _split_lines(msg)\n # The first line is the start line.\n start_line = lines[0]\n # Remaining lines are the header.\n header = _parse_header(lines[1 :])\n return start_line, header, body", "def _preprocess_sgm(line, is_sgm):\n pass", "def process(self,line):\n\n pattern = re.compile(\"@.*?@\")\n matches = pattern.findall(line)\n for m in matches:\n replacement = r\"<small>{}</small>\".format(re.escape(m[1:-1]))\n line = pattern.sub(replacement,line)\n\n return line", "def _split_lines(msg):\n # Find the EOL sequence encoded as bytes.\n eol = EOL.encode(HEADER_ENCODING)\n\n # The list into which we will collect header lines.\n lines = []\n # Keep going until we find a blank line.\n while True:\n # Look for the EOL sequence.\n index = msg.index(eol)\n # Split off the line, not including the EOL.\n line = msg[: index]\n # In the message, skip over the line, past the EOL.\n msg = msg[index + len(eol) :]\n # Is the line blank?\n if len(line) == 0:\n # Yes. We're done; return the lines and whatever data is left in\n # the message.\n return lines, msg\n else:\n # No. Decode the line.\n line = line.decode(HEADER_ENCODING)\n # Store it in the list of lines.\n lines.append(line)\n # Now continue at the top of the loop.", "def _preprocess(self, stream):\n unfinished = ' <unfinished ...>'\n resumed = '<... [^ ]+ resumed> (.*)$'\n in_progressed = {}\n\n for line in stream:\n pid, timestamp, rest = line.rstrip().split(None, 2)\n\n # Save any lines that are unfinished.\n # Line must *end* with the string unfinished.\n i = rest.rfind(unfinished)\n if i != -1 and i == len(rest) - len(unfinished):\n partial_line = rest[:i]\n in_progressed[pid] = (timestamp, partial_line)\n continue\n\n # Resume lines. Line must *start* with resumed string.\n match = re.search(resumed, line)\n if match:\n resumed_line = match.groups()[0]\n timestamp, partial_line = in_progressed.pop(pid)\n line = '{} {} {}{}'.format(\n pid, timestamp, partial_line, resumed_line)\n\n yield line", "def strip_gutenberg_header(input_book_lines):\n\tlines = input_book_lines\n\tcurr_line = 0\n\twhile lines[curr_line].find('START OF THIS PROJECT GUTENBERG EBOOK') == -1:\n\t\tcurr_line += 1\n\treturn lines[curr_line+1:]", "def _preprocess(self, source):\n source = source.replace(u'\\n', u'').strip()\n source = re.sub(r'<br\\s*\\/?\\s*>', u' ', source, re.I)\n source = re.sub(r'\\s\\s+', u' ', source)\n return source", "def inject_headers(mime, headers):\n for key in headers:\n if key == 'Bcc' or key == 'Resent-Bcc':\n continue\n del mime[key]\n mime[key] = encode_header(headers[key])", "def encode_message_header(self):\n # See [4], chapter 8 for info on how to use these\n # from_types = {'Address-present-token': 0x80,\n # 'Insert-address-token': 0x81}\n\n # content_types = {'application/vnd.wap.multipart.related': 0xb3}\n\n # Create an array of 8-bit values\n message_header = array.array('B')\n\n headers_to_encode = self._mms_message.headers\n\n # If the user added any of these to the message manually\n # (X- prefix) use those instead\n for hdr in ('X-Mms-Message-Type', 'X-Mms-Transaction-Id',\n 'X-Mms-Version'):\n if hdr in headers_to_encode:\n if hdr == 'X-Mms-Version':\n clean_header = 'MMS-Version'\n else:\n clean_header = hdr.replace('X-Mms-', '', 1)\n\n headers_to_encode[clean_header] = headers_to_encode[hdr]\n del headers_to_encode[hdr]\n\n # First 3 headers (in order), according to [4]:\n ################################################\n # - X-Mms-Message-Type\n # - X-Mms-Transaction-ID\n # - X-Mms-Version\n\n ### Start of Message-Type verification\n if 'Message-Type' not in headers_to_encode:\n # Default to 'm-retrieve-conf'; we don't need a To/CC field for\n # this (see WAP-209, section 6.3, table 5)\n headers_to_encode['Message-Type'] = 'm-retrieve-conf'\n\n # See if the chosen message type is valid, given the message's\n # other headers. NOTE: we only distinguish between 'm-send-req'\n # (requires a destination number) and 'm-retrieve-conf'\n # (requires no destination number) - if \"Message-Type\" is\n # something else, we assume the message creator knows\n # what she is doing\n if headers_to_encode['Message-Type'] == 'm-send-req':\n found_dest_address = False\n for address_type in ('To', 'Cc', 'Bc'):\n if address_type in headers_to_encode:\n found_dest_address = True\n break\n\n if not found_dest_address:\n headers_to_encode['Message-Type'] = 'm-retrieve-conf'\n ### End of Message-Type verification\n\n ### Start of Transaction-Id verification\n if 'Transaction-Id' not in headers_to_encode:\n trans_id = str(random.randint(1000, 9999))\n headers_to_encode['Transaction-Id'] = trans_id\n ### End of Transaction-Id verification\n\n ### Start of MMS-Version verification\n if 'MMS-Version' not in headers_to_encode:\n headers_to_encode['MMS-Version'] = '1.0'\n\n # Encode the first three headers, in correct order\n for hdr in ('Message-Type', 'Transaction-Id', 'MMS-Version'):\n message_header.extend(\n MMSEncoder.encode_header(hdr, headers_to_encode[hdr]))\n del headers_to_encode[hdr]\n\n # Encode all remaining MMS message headers, except \"Content-Type\"\n # -- this needs to be added last, according [2] and [4]\n for hdr in headers_to_encode:\n if hdr != 'Content-Type':\n message_header.extend(\n MMSEncoder.encode_header(hdr, headers_to_encode[hdr]))\n\n # Ok, now only \"Content-type\" should be left\n content_type, ct_parameters = headers_to_encode['Content-Type']\n message_header.extend(MMSEncoder.encode_mms_field_name('Content-Type'))\n ret = MMSEncoder.encode_content_type_value(content_type, ct_parameters)\n message_header.extend(flatten_list(ret))\n\n return message_header", "def preprocessingSMS(textLine):\n\treturn textLine.split(None, 1)[1]", "def preprocess (self, filecontents):\n\t\treturn filecontents", "def rfc822_escape(header):\n lines = header.split('\\n')\n sep = '\\n' + 8 * ' '\n return sep.join(lines)", "def _preprocess_file(file_name):\n raw_content = utils.run_on_main_thread(\n partial(utils.get_file_content, file_name, force_lf_endings=True))\n\n # replace all comments with spaces to not change the position\n # of the rest\n comments = [c for c in _RE_COMMENT.finditer(raw_content)]\n content = list(raw_content)\n for m in comments:\n for i in range(m.start(), m.end()):\n content[i] = ' '\n content = \"\".join(content)\n return raw_content, content", "def trans(monitext):\n result = ''\n last_line = 'empty'\n\n while monitext:\n # newline character or empty line(s)\n matched = re.match(r'\\n+', monitext, re.M)\n\n if matched:\n result += matched.group()\n if len(matched.group()) > 1:\n last_line = 'empty'\n elif last_line == 'title':\n result += '\\n'\n last_line = 'empty'\n monitext = monitext[matched.end():]\n continue\n\n # code block\n matched = re.match(r'{{{.*?\\n((\\n|.)*?)\\n}}}', monitext, re.M)\n\n if matched:\n body = matched.groups()[0]\n result += '\\n\\t' + '\\n\\t'.join(body.split('\\n'))\n monitext = monitext[matched.end():]\n last_line = 'code'\n continue\n\n # header\n matched = re.match(r'^(=+) (.+) (=+)', monitext)\n\n if matched:\n title = matched.groups()[1]\n level = len(matched.groups()[0])\n\n if last_line != 'empty':\n result += '\\n'\n\n if level < 4:\n underscore = {2 : '=', 3 : '-'}[level] * mbstrlen(title)\n result += title + os.linesep + underscore\n else:\n result += ('#' * level) + \" \" + title\n monitext = monitext[matched.end():]\n\n last_line = 'title'\n\n continue\n\n # link\n matched = re.match(r'(.*)\\[([^\\s]+[ \\t]+)?(.+)\\]', monitext)\n\n if matched:\n pre = matched.groups()[0]\n url = matched.groups()[1]\n if url:\n url = url.strip()\n name = matched.groups()[2]\n\n if url:\n replaced = \"%s[%s](%s)\" % (pre, name, url)\n else:\n replaced = \"%s[%s](%s)\" % (pre, name, name)\n\n monitext = monitext[:matched.start()] + replaced\\\n + monitext[matched.end():]\n\n # important\n monitext = re.sub(r'\\'\\'\\'(.*?)\\'\\'\\'', r'**\\1**', monitext)\n\n # italic\n monitext = re.sub(r'\\'\\'(.*?)\\'\\'', r'_\\1_', monitext)\n\n # list\n matched = re.match(r'^(\\s*)\\* (.*)', monitext)\n\n if matched:\n depth = len(matched.groups()[0])\n body = matched.groups()[1]\n result += (depth - 1) * '\\t' + '* ' + body\n monitext = monitext[matched.end():]\n\n last_line = 'others'\n\n try:\n # Go to the next line\n index = monitext.index('\\n')\n result += monitext[:index]\n monitext = monitext[index:]\n except ValueError:\n result += monitext\n break\n\n return result", "def pre_process_text_block(block):\n block['content'] = block['content'].strip()", "def _parseHeader(self):\n # Big or little endian for the header.\n self._getEndianess()\n # Read the fixed header.\n self._readFixedHeader()\n # Get the present blockettes.\n self._getBlockettes()\n # Calculate the starttime.\n self._calculateStarttime()", "def preprocess(self, text):\r\n return text", "def _process_content_codings(self, chunk):\n content_codings = self.parsed_headers.get('content-encoding', [])\n content_codings.reverse()\n for coding in content_codings:\n # TODO: deflate support\n if coding in ['gzip', 'x-gzip'] and self._decode_ok:\n if not self._in_gzip_body:\n self._gzip_header_buffer += chunk\n try:\n chunk = self._read_gzip_header(\n self._gzip_header_buffer\n )\n self._in_gzip_body = True\n except IndexError:\n return '' # not a full header yet\n except IOError, gzip_error:\n self.add_note('header-content-encoding',\n rs.BAD_GZIP,\n gzip_error=str(gzip_error)\n )\n self._decode_ok = False\n return\n try:\n chunk = self._gzip_processor.decompress(chunk)\n except zlib.error, zlib_error:\n self.add_note(\n 'header-content-encoding', \n rs.BAD_ZLIB,\n zlib_error=str(zlib_error),\n ok_zlib_len=f_num(self.payload_sample[-1][0]),\n chunk_sample=chunk[:20].encode('string_escape')\n )\n self._decode_ok = False\n return\n else:\n # we can't handle other codecs, so punt on body processing.\n self._decode_ok = False\n return\n self._md5_post_processor.update(chunk)\n self.decoded_len += len(chunk)\n return chunk", "def modify_header():\n\n print_debug_info()\n if not bool(int(vim.eval(\"g:BHModify\"))):\n return\n\n if not should_do_write():\n debug(\"should not write this buffer.\")\n return\n\n if not has_header():\n debug(\"This file has no header.\")\n return add_header()\n\n # only if the suffix is supported and we have a method to strip the comment.\n if not ((\"extract_comment_%s\" % SUFFIX) in globals() and suffix_is_supported()):\n return\n\n comment = globals()[\"extract_comment_%s\" % SUFFIX]()\n debug(\"comment: %s\" % str(comment))\n if not comment:\n debug(\"comment is empty\")\n return\n\n comment_dict = {}\n\n if len(comment) < 3:\n # Less than 3 lines of original comment, put them in Description part.\n comment_dict['Description'] = '\\n'.join(comment)\n else:\n comment_dict = read_comment(comment)\n if \"\" in comment_dict:\n del comment_dict[\"\"]\n new_header_dict = read_comment(globals().get(\"%s_header\" % SUFFIX).rstrip().splitlines())\n debug(\"new\")\n debug(set(new_header_dict.keys()))\n debug(set(comment_dict.keys()))\n debug(\"end\")\n if not set(new_header_dict.keys()) == set(comment_dict.keys()):\n return prepend_header(render_header(comment_dict))\n else:\n debug(\"do not modify header since we already have the same header.\")", "def _parse_header(self, line):\n if self._regex_helper.search_compiled(W._re_header, line):\n if not self.headers:\n for value in re.findall(W._re_header, line):\n self.headers.append(value[0])\n raise ParsingDone\n else:\n # Dictionary which is going to be appended to the returned list\n ret = dict()\n # List of entries\n _entries = list()\n # List of values in WHAT entry\n _what_entry = list()\n for value in re.findall(W._re_header, line):\n _entries.append(value[0])\n for what_index in range(len(self.headers) - 1, len(_entries)):\n _what_entry.append(_entries[what_index])\n _what_entry_string = ' '.join(_what_entry)\n for index in range(len(self.headers)):\n if index < len(self.headers) - 1:\n ret.update({self.headers[index]: _entries[index]})\n else:\n ret.update({self.headers[index]: _what_entry_string})\n self.current_ret['RESULT'].append(ret)\n raise ParsingDone", "def process(mlist, msg, msgdata):\n # Digests and Mailman-craft messages should not get additional headers.\n if msgdata.get('isdigest') or msgdata.get('nodecorate'):\n return\n d = {}\n member = msgdata.get('member')\n if member is not None:\n # Calculate the extra personalization dictionary.\n recipient = msgdata.get('recipient', member.address.original_email)\n d['member'] = formataddr(\n (member.subscriber.display_name, member.subscriber.email))\n d['user_email'] = recipient\n d['user_delivered_to'] = member.address.original_email\n d['user_language'] = member.preferred_language.description\n d['user_name'] = member.display_name\n # For backward compatibility.\n d['user_address'] = recipient\n # Calculate the archiver permalink substitution variables. This provides\n # the $<archive-name>_url placeholder for every enabled archiver.\n for archiver in IListArchiverSet(mlist).archivers:\n if archiver.is_enabled:\n # Get the permalink of the message from the archiver. Watch out\n # for exceptions in the archiver plugin.\n try:\n archive_url = archiver.system_archiver.permalink(mlist, msg)\n except Exception:\n alog.exception('Exception in \"{}\" archiver'.format(\n archiver.system_archiver.name))\n archive_url = None\n if archive_url is not None:\n placeholder = '{}_url'.format(archiver.system_archiver.name)\n d[placeholder] = archive_url\n # These strings are descriptive for the log file and shouldn't be i18n'd\n d.update(msgdata.get('decoration-data', {}))\n header = decorate('list:member:regular:header', mlist, d)\n footer = decorate('list:member:regular:footer', mlist, d)\n # Escape hatch if both the footer and header are empty or None.\n if len(header) == 0 and len(footer) == 0:\n return\n # Be MIME smart here. We only attach the header and footer by\n # concatenation when the message is a non-multipart of type text/plain.\n # Otherwise, if it is not a multipart, we make it a multipart, and then we\n # add the header and footer as text/plain parts.\n #\n # BJG: In addition, only add the footer if the message's character set\n # matches the charset of the list's preferred language. This is a\n # suboptimal solution, and should be solved by allowing a list to have\n # multiple headers/footers, for each language the list supports.\n #\n # Also, if the list's preferred charset is us-ascii, we can always\n # safely add the header/footer to a plain text message since all\n # charsets Mailman supports are strict supersets of us-ascii --\n # no, UTF-16 emails are not supported yet.\n #\n # TK: Message with 'charset=' cause trouble. So, instead of\n # mgs.get_content_charset('us-ascii') ...\n mcset = msg.get_content_charset() or 'us-ascii'\n lcset = mlist.preferred_language.charset\n msgtype = msg.get_content_type()\n # BAW: If the charsets don't match, should we add the header and footer by\n # MIME multipart chroming the message?\n wrap = True\n if not msg.is_multipart() and msgtype == 'text/plain':\n # Save the RFC-3676 format parameters.\n format_param = msg.get_param('format')\n delsp = msg.get_param('delsp')\n # Save 'Content-Transfer-Encoding' header in case decoration fails.\n cte = msg.get('content-transfer-encoding')\n # header/footer is now in unicode.\n try:\n oldpayload = msg.get_payload(decode=True).decode(mcset)\n del msg['content-transfer-encoding']\n frontsep = endsep = ''\n if len(header) > 0 and not header.endswith('\\n'):\n frontsep = '\\n'\n if len(footer) > 0 and not oldpayload.endswith('\\n'):\n endsep = '\\n'\n payload = header + frontsep + oldpayload + endsep + footer\n # When setting the payload for the message, try various charset\n # encodings until one does not produce a UnicodeError. We'll try\n # charsets in this order: the list's charset, the message's\n # charset, then utf-8. It's okay if some of these are duplicates.\n for cset in (lcset, mcset, 'utf-8'):\n try:\n msg.set_payload(payload.encode(cset), cset)\n except UnicodeError:\n pass\n else:\n if format_param:\n msg.set_param('format', format_param)\n if delsp:\n msg.set_param('delsp', delsp)\n wrap = False\n break\n except (LookupError, UnicodeError):\n if cte:\n # Restore the original c-t-e.\n del msg['content-transfer-encoding']\n msg['Content-Transfer-Encoding'] = cte\n elif msg.get_content_type() == 'multipart/mixed':\n # The next easiest thing to do is just prepend the header and append\n # the footer as additional subparts\n payload = msg.get_payload()\n if not isinstance(payload, list):\n payload = [payload]\n if len(footer) > 0:\n mimeftr = MIMEText(footer.encode(lcset), 'plain', lcset)\n mimeftr['Content-Disposition'] = 'inline'\n payload.append(mimeftr)\n if len(header) > 0:\n mimehdr = MIMEText(header.encode(lcset), 'plain', lcset)\n mimehdr['Content-Disposition'] = 'inline'\n payload.insert(0, mimehdr)\n msg.set_payload(payload)\n wrap = False\n # If we couldn't add the header or footer in a less intrusive way, we can\n # at least do it by MIME encapsulation. We want to keep as much of the\n # outer chrome as possible.\n if not wrap:\n return\n # Because of the way Message objects are passed around to process(), we\n # need to play tricks with the outer message -- i.e. the outer one must\n # remain the same instance. So we're going to create a clone of the outer\n # message, with all the header chrome intact, then copy the payload to it.\n # This will give us a clone of the original message, and it will form the\n # basis of the interior, wrapped Message.\n inner = Message()\n # Which headers to copy? Let's just do the Content-* headers\n for h, v in msg.items():\n if h.lower().startswith('content-'):\n inner[h] = v\n inner.set_payload(msg.get_payload())\n # For completeness\n inner.set_unixfrom(msg.get_unixfrom())\n inner.preamble = msg.preamble\n inner.epilogue = msg.epilogue\n # Don't copy get_charset, as this might be None, even if\n # get_content_charset isn't. However, do make sure there is a default\n # content-type, even if the original message was not MIME.\n inner.set_default_type(msg.get_default_type())\n # BAW: HACK ALERT.\n if hasattr(msg, '__version__'):\n inner.__version__ = msg.__version__\n # Now, play games with the outer message to make it contain three\n # subparts: the header (if any), the wrapped message, and the footer (if\n # any).\n payload = [inner]\n if len(header) > 0:\n mimehdr = MIMEText(header.encode(lcset), 'plain', lcset)\n mimehdr['Content-Disposition'] = 'inline'\n payload.insert(0, mimehdr)\n if len(footer) > 0:\n mimeftr = MIMEText(footer.encode(lcset), 'plain', lcset)\n mimeftr['Content-Disposition'] = 'inline'\n payload.append(mimeftr)\n msg.set_payload(payload)\n del msg['content-type']\n del msg['content-transfer-encoding']\n del msg['content-disposition']\n msg['Content-Type'] = 'multipart/mixed'", "def __init__(self, content):\n\t\tself.raw = content\n\t\tself.mail = email.message_from_string(self.raw)\n\n\t\tself.text_content = ''\n\t\tself.html_content = ''\n\t\tself.attachments = []\n\t\tself.cid_map = {}\n\t\tself.parse()\n\t\tself.set_content_and_type()\n\t\tself.set_subject()\n\t\tself.set_from()\n\t\tself.message_id = self.mail.get('Message-ID')\n\n\n\t\tself.unique_id = get_unique_id(self.mail)\n\n\t\t# gmail mailing-list compatibility\n\t\t# use X-Original-Sender if available, as gmail sometimes modifies the 'From'\n\t\t# _from_email = self.mail.get(\"X-Original-From\") or self.mail[\"From\"]\n\t\t# \n\t\t# self.from_email = extract_email_id(_from_email)\n\t\t# if self.from_email:\n\t\t# \tself.from_email = self.from_email.lower()\n\t\t# \n\t\t# #self.from_real_name = email.utils.parseaddr(_from_email)[0]\n\t\t# \n\t\t# _from_real_name = decode_header(email.utils.parseaddr(_from_email)[0])\n\t\t# self.from_real_name = decode_header(email.utils.parseaddr(_from_email)[0])[0][0] or \"\"\n\t\t# \n\t\t# try:\n\t\t# \tif _from_real_name[0][1]:\n\t\t# \t\tself.from_real_name = self.from_real_name.decode(_from_real_name[0][1])\n\t\t# \telse:\n\t\t# \t\t# assume that the encoding is utf-8\n\t\t# \t\tself.from_real_name = self.from_real_name.decode(\"utf-8\")\n\t\t# except UnicodeDecodeError,e:\n\t\t# \tprint e\n\t\t# \tpass\n\n\t\t#self.from_real_name = email.Header.decode_header(email.utils.parseaddr(_from_email)[0])[0][0]\n\t\tself.To = self.mail.get(\"To\")\n\t\tif self.To:\n\t\t\tto = u\"\"\n\t\t\tfor name, encoding in decode_header(self.To):\n\t\t\t\tif encoding:\n\t\t\t\t\tto += name.decode(encoding)\n\t\t\t\telse:\n\t\t\t\t\tto += name\n\t\t\tself.To = to.lower()\n\t\tself.CC = self.mail.get(\"CC\")\n\t\tif self.CC:\n\t\t\tself.CC = self.CC.lower()\n\t\tif self.mail[\"Date\"]:\n\t\t\ttry:\n\t\t\t\tutc = email.utils.mktime_tz(email.utils.parsedate_tz(self.mail[\"Date\"]))\n\t\t\t\tutc_dt = datetime.datetime.utcfromtimestamp(utc)\n\t\t\t\tself.date = convert_utc_to_user_timezone(utc_dt).strftime('%Y-%m-%d %H:%M:%S')\n\t\t\texcept:\n\t\t\t\tself.date = now()\n\t\telse:\n\t\t\tself.date = now()\n\t\tif self.date > now():\n\t\t\tself.date = now()", "def parseheader(self):\n for line in self.rawheader.split(\"\\n\"):\n pat = \"QUITTING\"\n if pat in line:\n self.prefix = line\n continue\n\n pat = \"VERSION NUMBER\"\n if pat in line:\n self.softvers = line[28:].strip()\n continue\n\n pat = \"DATE/TIME IS\"\n if pat in line:\n meta = line[22:].strip()\n matchobj = dtpat.match(meta)\n if matchobj:\n try:\n self.dumpdt = datetime.strptime(meta, moddtfmt)\n except:\n self.nodump = True\n self.comment += (\n \" *** Cannot read module date/time: {}\\n\".format(meta)\n )\n continue\n\n pat = \"NUMBER RECORDS IS\"\n if pat in line:\n self.ndumprec = line[22:].strip()\n continue\n\n pat = \"MODULE TYPE IS\"\n if pat in line:\n self.modtype = line[22:].strip()\n continue\n\n pat = \"SERIAL NUMBER IS\"\n if pat in line:\n self.modserial = line[22:].strip()\n continue\n\n pat = \"COND S/N IS\"\n if pat in line:\n meta = line[22:].strip()\n serials = meta.split(\"/\")\n self.cellserial = serials[1]\n self.ioserial = serials[0]\n continue\n\n pat = \"SAMPLING INTERVAL IS\"\n if pat in line:\n meta = line[22:].strip()\n self.sampintv = meta\n if meta == \"00:01:00\":\n self.nodump = False\n self.comment += \" *** Sample interval is {}\\n\".format(meta)\n elif meta != \"00:02:00\":\n self.nodump = True\n self.comment += \" *** Sample interval is {}\\n\".format(meta)\n continue\n\n pat = \"AVERAGE INTERVAL IS\"\n if pat in line:\n self.avgintv = line[22:].strip()\n if int(self.avgintv) != 24:\n self.nodump = True\n self.comment += \" *** Average interval is {}\\n\".format(meta)\n continue\n\n pat = \"BATTERY VOLTAGE IS\"\n if pat in line:\n self.voltage = line[22:].strip()\n continue\n\n return self.modserial", "async def handle_message_header(self, peer_name, message_header, payload):", "def normalize(self):\n self.header.set_length(self.body)", "def convert_header(contents):\n replacement = re.sub(r\"WEBVTT\\n\", \"\", contents)\n replacement = re.sub(r\"Kind:[ \\-\\w]+\\n\", \"\", replacement)\n replacement = re.sub(r\"Language:[ \\-\\w]+\\n\", \"\", replacement)\n return replacement", "def _ProcessLine(\n self,\n first_line,\n input_line,\n line,\n stripped_line,\n output_stream):\n # Check for the start of a code block.\n if constants.START_CODEBLOCK_RE.match(stripped_line):\n if self._code_block_depth == 0:\n # Start a new collection of lines.\n self._code_block_lines = []\n else:\n # Just an embedded code block.\n self._code_block_lines.append(line)\n self._code_block_depth += 1\n return\n\n # Check for the end of a code block.\n if constants.END_CODEBLOCK_RE.match(stripped_line):\n self._code_block_depth -= 1\n if self._code_block_depth == 0:\n # Closed the highest-level code block, handle it.\n self._formatting_handler.HandleEscapedText(\n input_line,\n output_stream,\n \"\\n\")\n self._formatting_handler.HandleCodeBlockOpen(\n input_line,\n output_stream,\n None)\n code = \"\".join(self._code_block_lines)\n self._formatting_handler.HandleText(input_line, output_stream, code)\n self._formatting_handler.HandleCodeBlockClose(input_line, output_stream)\n else:\n # Just closed an embedded clode block.\n self._code_block_lines.append(line)\n return\n\n # Check if we're in a code block.\n # If we are, just put the raw text into code_block_lines.\n if self._code_block_depth != 0:\n self._code_block_lines.append(line)\n return\n\n # For empty lines, close all formatting.\n if not stripped_line:\n if not self._ConsumeTextForPlugin():\n self._SetCurrentList(input_line, 0, \" \", output_stream)\n self._CloseTags(input_line, output_stream)\n\n if self._table_columns:\n self._formatting_handler.HandleTableClose(input_line, output_stream)\n self._table_columns = []\n self._table_column = 0\n\n self._formatting_handler.HandleParagraphBreak(input_line, output_stream)\n return\n\n # Non-empty line, finish the previous line's newline.\n if not first_line:\n self._formatting_handler.HandleEscapedText(\n input_line,\n output_stream,\n \"\\n\")\n\n # Now check if we're processing within a list.\n indent_pos = constants.INDENT_RE.match(line).end()\n if (indent_pos and indent_pos < len(line) and\n not self._ConsumeTextForPlugin()):\n list_type = constants.LIST_TYPES.get(line[indent_pos], \"blockquote\")\n\n if self._SetCurrentList(input_line, indent_pos, list_type, output_stream):\n # Blockquotes take the entire remainder of the line,\n # but everything else skips the list symbol plus the space after.\n # (In case there is no space after, the first character is skipped;\n # we will warn if this is detected, as it was probably unintended.)\n if list_type == \"blockquote\":\n line = line[indent_pos:]\n else:\n if line[indent_pos + 1] != \" \":\n self._warning_method(\n input_line,\n u\"Missing space after list symbol: {0}, \"\n \"'{1}' was removed instead.\"\n .format(line[indent_pos], line[indent_pos + 1]))\n line = line[indent_pos + 2:]\n\n stripped_line = line.strip()\n else:\n # Reset to no indent.\n self._SetCurrentList(input_line, 0, \" \", output_stream)\n\n # Finally, split the line into formatting primitives.\n # We do so without whitespace so we can catch line breaks across tags.\n if constants.LINE_FORMAT_RE.match(stripped_line):\n self._ProcessMatch(\n input_line,\n constants.LINE_FORMAT_RE,\n stripped_line,\n output_stream)\n else:\n self._ProcessMatch(\n input_line,\n constants.TEXT_FORMAT_RE,\n stripped_line,\n output_stream)\n\n self._CloseTableRow(input_line, output_stream)", "def getmailheader(header_text, default=\"ascii\"):\n try:\n headers = decode_header(header_text)\n except email.Errors.HeaderParseError:\n # This already append in email.base64mime.decode()\n # instead return a sanitized ascii string\n return header_text.encode('ascii', 'replace').decode('ascii')\n else:\n for i, (text, charset) in enumerate(headers):\n try:\n headers[i] = unicode(text, charset or default, errors='replace')\n except LookupError:\n # if the charset is unknown, force default\n headers[i] = unicode(text, default, errors='replace')\n return u\"\".join(headers)", "def fix_header(po):\r\n\r\n # By default, django-admin.py makemessages creates this header:\r\n #\r\n # SOME DESCRIPTIVE TITLE.\r\n # Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER\r\n # This file is distributed under the same license as the PACKAGE package.\r\n # FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.\r\n\r\n po.metadata_is_fuzzy = [] # remove [u'fuzzy']\r\n header = po.header\r\n fixes = (\r\n ('SOME DESCRIPTIVE TITLE', EDX_MARKER),\r\n ('Translations template for PROJECT.', EDX_MARKER),\r\n ('YEAR', str(datetime.utcnow().year)),\r\n ('ORGANIZATION', 'edX'),\r\n (\"THE PACKAGE'S COPYRIGHT HOLDER\", \"EdX\"),\r\n (\r\n 'This file is distributed under the same license as the PROJECT project.',\r\n 'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.'\r\n ),\r\n (\r\n 'This file is distributed under the same license as the PACKAGE package.',\r\n 'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.'\r\n ),\r\n ('FIRST AUTHOR <EMAIL@ADDRESS>', 'EdX Team <info@edx.org>'),\r\n )\r\n for src, dest in fixes:\r\n header = header.replace(src, dest)\r\n po.header = header", "def handle_message_header(self, message_header, payload):\n pass", "def header(self, field, value):\n if field.lower() == 'from':\n logger.debug(f\"({self.id}) \\\"From:\\\" raw: '{value}'\")\n value = normalizeRawFromHeader(value)\n logger.info(f\"({self.id}) \\\"From:\\\" cleaned: '{value}'\")\n if value == '':\n logger.warning(f\"\\\"From:\\\" header empty! WTF, but nothing to do. OK for now.\")\n self.set_suspicious_headers(False, \"EMPTY FROM HEADER - WTF\")\n else:\n decoded_from = get_decoded_header(value)\n logger.debug(f\"({self.id}) \\\"From:\\\" decoded raw: '{value}'\")\n decoded_from = normalizeRawFromHeader(decoded_from)\n logger.info(f\"({self.id}) \\\"From:\\\" decoded cleaned: '{decoded_from}'\")\n all_domains = address_domain_regex.findall(decoded_from)\n all_domains = [a.lower() for a in all_domains]\n if len(all_domains) == 0:\n logger.warning(f\"({self.id}) No domain in decoded \\\"From:\\\" - WTF! OK, though\")\n self.set_suspicious_headers(False, \"No domains in decoded FROM\")\n elif len(all_domains) == 1:\n logger.debug(f\"({self.id}) Only one domain in decoded \\\"From:\\\": '{all_domains[0]}' - OK\")\n self.set_suspicious_headers(False, \"Only one domain in decoded FROM\")\n else:\n logger.info(f\"({self.id}) Raw decoded from header contains multiple domains: '{all_domains}' - Checking\")\n if len(set(all_domains)) > 1:\n logger.info(f\"({self.id}) Multiple different domains in decoded \\\"From:\\\". - NOT OK\")\n self.set_suspicious_headers(True, \"Multiple domains in decoded FROM are different\")\n else:\n logger.info(f\"({self.id}) All domains in decoded \\\"From:\\\" are identical - OK\")\n self.set_suspicious_headers(False, \"Multiple domains in decoded FROM match properly\")\n # CONTINUE so we reach eom hook.\n # TODO: Log and react if multiple From-headers are found?\n return Milter.CONTINUE", "def fix_header(file_path):\n logging.warning(\"Couldn't open edf {}. Trying to fix the header ...\".format(file_path))\n f = open(file_path, 'rb')\n content = f.read()\n f.close()\n \n header = content[:256]\n # print(header)\n\n # version = header[:8].decode('ascii')\n # patient_id = header[8:88].decode('ascii')\n # [age] = re.findall(\"Age:(\\d+)\", patient_id)\n # [sex] = re.findall(\"\\s\\w\\s\", patient_id)\n\n recording_id = header[88:168].decode('ascii')\n # startdate = header[168:176]\n # starttime = header[176:184]\n # n_bytes_in_header = header[184:192].decode('ascii')\n # reserved = header[192:236].decode('ascii')\n # THIS IS MESSED UP IN THE HEADER DESCRIPTION\n # duration = header[236:244].decode('ascii')\n # n_data_records = header[244:252].decode('ascii')\n # n_signals = header[252:].decode('ascii')\n \n date = recording_id[10:21]\n day, month, year = date.split('-')\n if month == 'JAN':\n month = '01'\n\n elif month == 'FEB':\n month = '02'\n\n elif month == 'MAR':\n month = '03'\n\n elif month == 'APR':\n month = '04'\n\n elif month == 'MAY':\n month = '05'\n\n elif month == 'JUN':\n month = '06'\n\n elif month == 'JUL':\n month = '07'\n\n elif month == 'AUG':\n month = '08'\n\n elif month == 'SEP':\n month = '09'\n\n elif month == 'OCT':\n month = '10'\n\n elif month == 'NOV':\n month = '11'\n\n elif month == 'DEC':\n month = '12'\n\n year = year[-2:]\n date = '.'.join([day, month, year])\n \n fake_time = '00.00.00'\n \n # n_bytes = int(n_bytes_in_header) - 256\n # n_signals = int(n_bytes / 256)\n # n_signals = str(n_signals) + ' '\n # n_signals = n_signals[:4]\n \n # new_header = version + patient_id + recording_id + date + fake_time + n_bytes_in_header + reserved +\n # new_header += n_data_records + duration + n_signals\n # new_content = (bytes(new_header, encoding=\"ascii\") + content[256:])\n\n new_content = header[:168] + bytes(date + fake_time, encoding=\"ascii\") + header[184:] + content[256:]\n\n # f = open(file_path, 'wb')\n # f.write(new_content)\n # f.close()", "def __compose(self, msg):\n header = b'\\xFB\\xBF'\n end = b'\\xED'\n # Length is sum of header(2), length, check + msg bytes\n length = bytes([4 + len(msg)])\n # Check is sum of length + msg (length+(cmd+params)), with modulus\n # to fit into a single byte\n check_list = bytearray(length)\n check_list.extend(msg)\n check = bytes([sum(check_list) % 256])\n return header + length + msg + check + end", "def _process_incoming_mail(raw_message, recipients):\n recipients = [x[1] for x in email.utils.getaddresses([recipients])]\n\n incoming_msg = mail.InboundEmailMessage(raw_message)\n\n if 'X-Google-Appengine-App-Id' in incoming_msg.original:\n raise InvalidIncomingEmailError('Mail sent by App Engine')\n\n # Use the subject to find the issue number.\n # Originally the tag was (issueNNN).\n # Then we changed it to be (issue NNN by WHO).\n # We want to match either of these, and we need to deal with\n # the fact that some mail readers will fold the long subject,\n # turning a single space into \"\\r\\n \".\n # We use \"issue\\s*\" to handle all these forms,\n # and we omit the closing ) to accept both the original and the \"by WHO\" form.\n subject = incoming_msg.subject or ''\n match = re.search(r'\\(issue\\s*(?P<id>\\d+)', subject)\n if match is None:\n raise InvalidIncomingEmailError('No issue id found: %s', subject)\n issue_id = int(match.groupdict()['id'])\n issue = models.Issue.get_by_id(issue_id)\n if issue is None:\n raise InvalidIncomingEmailError('Unknown issue ID: %d' % issue_id)\n sender = email.utils.parseaddr(incoming_msg.sender)[1]\n\n body = None\n for _, payload in incoming_msg.bodies('text/plain'):\n # FIXME(andi): Remove this when issue 2383 is fixed.\n # 8bit encoding results in UnknownEncodingError, see\n # http://code.google.com/p/googleappengine/issues/detail?id=2383\n # As a workaround we try to decode the payload ourselves.\n if payload.encoding == '8bit' and payload.charset:\n body = payload.payload.decode(payload.charset)\n # If neither encoding not charset is set, but payload contains\n # non-ASCII chars we can't use payload.decode() because it returns\n # payload.payload unmodified. The later type cast to db.Text fails\n # with a UnicodeDecodeError then.\n elif payload.encoding is None and payload.charset is None:\n # assume utf-8 but set replace flag to go for sure.\n body = payload.payload.decode('utf-8', 'replace')\n else:\n body = payload.decode()\n break\n if body is None or not body.strip():\n raise InvalidIncomingEmailError('Ignoring empty message.')\n elif len(body) > django_settings.RIETVELD_INCOMING_MAIL_MAX_SIZE:\n # see issue325, truncate huge bodies\n trunc_msg = '... (message truncated)'\n end = django_settings.RIETVELD_INCOMING_MAIL_MAX_SIZE - len(trunc_msg)\n body = body[:end]\n body += trunc_msg\n\n # If the subject is long, this might come wrapped into more than one line.\n subject = ' '.join([x.strip() for x in subject.splitlines()])\n msg = models.Message(issue_key=issue.key, parent=issue.key,\n subject=subject,\n sender=sender,\n recipients=[x for x in recipients],\n date=datetime.datetime.now(),\n text=body,\n draft=False)\n\n # Add sender to reviewers if needed.\n all_emails = [str(x).lower()\n for x in ([issue.owner.email()] +\n issue.reviewers +\n issue.cc +\n issue.collaborator_emails())]\n if sender.lower() not in all_emails:\n query = models.Account.query(models.Account.lower_email == sender.lower())\n account = query.get()\n if account is not None:\n issue.reviewers.append(account.email) # e.g. account.email is CamelCase\n else:\n issue.reviewers.append(db.Email(sender))\n\n issue.calculate_updates_for(msg)\n issue.put()\n msg.put()", "def preprocess_raw(self):\n pass", "def reconstruct_pragma_multilines(header):\n\n # ...\n def _is_pragma(x):\n if not(isinstance(x, CommentNode) and x.value.startswith('#$')):\n return False\n env = x.value[2:].lstrip()\n if (env.startswith('header') or\n env.startswith('omp') or\n env.startswith('acc')):\n return False\n return True\n\n _ignore_stmt = lambda x: isinstance(x, (EndlNode, CommentNode)) and not _is_pragma(x)\n def _is_multiline(x):\n # we use tr/except to avoid treating nodes without .value\n try:\n return x.value.rstrip().endswith('&')\n except:\n return False\n\n condition = lambda x: (_is_multiline(x.parent) and (_is_pragma(x) or _ignore_stmt(x)))\n # ...\n\n if not _is_multiline(header):\n return header.value\n\n ls = []\n node = header.next\n while condition(node):\n # append the pragma stmt\n if _is_pragma(node):\n ls.append(node.value)\n\n # look if there are comments or empty lines\n node = node.next\n if _ignore_stmt(node):\n node = node.next\n\n txt = ' '.join(i for i in ls)\n txt = txt.replace('#$', '')\n txt = txt.replace('&', '')\n txt = '{} {}'.format(header.value.replace('&', ''), txt)\n return txt", "def _preamble(self):\n # A Python 2.7 legacy: remove \n # assert False == self.push(\"from __future__ import division\")\n \n # If something was left over, this discards it.\n self.resetbuffer()", "def normalizeRawFromHeader(value):\n return value.replace('\\n', '').replace('\\r', '').strip()", "def keep_header_subject(text, keep_subject=False):\n _before, _blankline, after = text.partition('\\n\\n')\n\n sub = [l for l in _before.split(\"\\n\") if \"Subject:\" in l]\n if keep_subject:\n final = sub[0] + \"\\n\" + after\n else:\n final = after\n return final", "def bert_preprocess(raw_text):\n nlp = English()\n nlp.add_pipe(nlp.create_pipe('sentencizer')) # updated\n doc = nlp(raw_text)\n sentences = [sent.string.strip() for sent in doc.sents][0:2] \n new_sentences = []\n for i, sentence in enumerate(sentences):\n if i==0:\n new_sentences.append(\"[CLS] \" + sentence + \" [SEP]\")\n else:\n new_sentences.append(sentence + \" [SEP]\")\n \n preprocessed_text = ' '.join(new_sentences)\n \n if \"[CLS]\" not in preprocessed_text:\n raise Exception(\"[CLS] not found in preprocessed text\")\n if \"[SEP]\" not in preprocessed_text:\n raise Exception(\"[SEP] not found in preprocessed text\")\n \n return", "def consolidate_messages(self, msg):", "def pre_process(in_path):\n in_string = open(in_path, 'r').read()\n multi_line = '/\\\\*[^*]*\\\\*+(?:[^/*][^*]*\\\\*+)*/'\n\n # header\n description = re.search(multi_line, in_string).group(0)\n unit = re.search('\\\\n\\\\s*// unit .*', in_string).group(0)\n imports = re.findall('\\\\n\\\\s*// import .*', in_string)\n import_string = ''\n for i in imports:\n import_string += resolve_import(i.strip()[10:], in_path.parent)\n\n use_string = ''\n uses = re.findall('\\\\n\\\\s*// uses .*', in_string)\n for u in uses:\n use_string += 'uses ' + u.strip()[8:] + ';\\n'\n if use_string != '':\n use_string = '\\n\\n' + use_string\n\n header = '{' + description[2:-2] + '}\\n\\nunit ' + unit.strip()[8:] + ';' + use_string + '\\n\\n'\n\n # main part\n in_string_list, delphi_string_list = split(import_string + '\\n\\n' + in_string)\n\n return header, in_string_list, delphi_string_list", "def _fixHeaderLength(self):\n self.header.seek(0)\n lines = self.header.readlines()\n headlength = len(lines)\n lines[0] = wrapLine(\"NLHEAD_FFI\", self.annotation, self.delimiter, \"%d%s%d\\n\" % (headlength, self.delimiter, self.FFI))\n self.header = StringIO(\"\".join(lines))\n self.header.seek(0)", "def parse(self):\n\t\tfor part in self.mail.walk():\n\t\t\tself.process_part(part)", "def preparse(self, raw):\n return raw", "def update_pre_block(pre_block):\r\n updated_block=\"\";\r\n count=0;\r\n for line in pre_block.splitlines():\r\n count+=1;\r\n if count<len(pre_block.splitlines()):\r\n line=update_line(line)+\"\\n\";\r\n updated_block=updated_block+line;\r\n else:\r\n line=update_line(line);\r\n updated_block=updated_block+line;\r\n return updated_block", "def _read_header(self, line):\n try:\n creation_date = datetime.strptime(line[23:33], '%y%m%d%H%M')\n except ValueError as err:\n print('Error parsing file creation date -> ' + str(err))\n creation_date = '000000'\n\n self.file_header = {'Priority Code': line[1:3],\n 'Immediate Destination': line[3:13].strip(),\n 'Immediate Origin': line[13:23].strip(),\n 'Creation Date': creation_date,\n 'File ID Modifier': line[33],\n 'Record Size': int(line[34:37].strip()),\n 'Blocking Factor': int(line[37:39]),\n 'Format Code': line[39],\n 'Immediate Destination Name': line[40:63].strip(),\n 'Immediate Origin Name': line[63:86].strip(),\n 'Reference Code': line[86:93]}", "def __call__(self,message):\n if self.header != self.prevHeader:\n if self.prevHeader:\n self.writeFooter()\n if self.header:\n self.writeHeader(self.header)\n self.prevHeader = self.header\n self.writeMessage(message)", "def _parse_headers(headers):\n\n headers_new = []\n # reformat column headers if needed\n for j, hd in enumerate(headers):\n # rename so always have T1/2 (s)\n if hd == \"T1/2 (num)\" or hd == \"T1/2 (seconds)\":\n hd = \"T1/2 (s)\"\n # for uncertainties, add previous column header to it\n if j > 0 and \"Unc\" in hd:\n hd = headers[j - 1] + \" \" + hd\n if \"Unc\" in hd and \"Unc.\" not in hd:\n hd = hd.replace(\"Unc\", \"Unc.\")\n # expand abbreviated headers\n if \"Energy\" in hd and \"Energy Level\" not in hd:\n hd = hd.replace(\"Energy\", \"Energy Level\")\n if \"Par. Elevel\" in hd:\n hd = hd.replace(\"Par. Elevel\", \"Parent Energy Level\")\n if \"Abund.\" in hd:\n hd = hd.replace(\"Abund.\", \"Abundance (%)\")\n if \"Ene.\" in hd:\n hd = hd.replace(\"Ene.\", \"Energy\")\n if \"Int.\" in hd:\n hd = hd.replace(\"Int.\", \"Intensity (%)\")\n if \"Dec\" in hd and \"Decay\" not in hd:\n hd = hd.replace(\"Dec\", \"Decay\")\n if \"Rad\" in hd and \"Radiation\" not in hd:\n hd = hd.replace(\"Rad\", \"Radiation\")\n if \"EP\" in hd:\n hd = hd.replace(\"EP\", \"Endpoint\")\n if \"Mass Exc\" in hd and \"Mass Excess\" not in hd:\n hd = hd.replace(\"Mass Exc\", \"Mass Excess\")\n headers_new.append(hd)\n if len(set(headers_new)) != len(headers_new):\n raise NNDCRequestError(\n \"Duplicate headers after parsing\\n\"\n + f' Original headers: \"{headers}\"\\n'\n + f' Parsed headers: \"{headers_new}\"'\n )\n return headers_new", "def pre_process(self, raw_text):\n # remove the space or other symbols\n word_lists = re.split(r'\\s+', raw_text.strip())\n if len(word_lists) < 2:\n print(word_lists)\n # exit(1)\n sent_index = word_lists[0]\n word_lists = ''.join(word_lists[1:])\n # word_lists = re.split(r'。', word_lists)\n # sent_content = ''.join(word_lists)\n return sent_index, word_lists", "def prepare_text_line(line):\n\n re_sub = re.sub\n # FIXME: maintain the original character positions\n\n # strip whitespace\n line = line.strip()\n\n # strip comment markers\n # common comment characters\n line = line.strip('\\\\/*#%;')\n # un common comment line prefix in dos\n line = re_sub('^rem\\s+', ' ', line)\n line = re_sub('^\\@rem\\s+', ' ', line)\n # un common comment line prefix in autotools am/in\n line = re_sub('^dnl\\s+', ' ', line)\n # un common comment line prefix in man pages\n line = re_sub('^\\.\\\\\\\\\"', ' ', line)\n # un common pipe chars in some ascii art\n line = line.replace('|', ' ')\n\n # normalize copyright signs and spacing aournd them\n line = line.replace('(C)', ' (c) ')\n line = line.replace('(c)', ' (c) ')\n # the case of \\251 is tested by 'weirdencoding.h'\n line = line.replace(u'\\251', u' (c) ')\n line = line.replace('&copy;', ' (c) ')\n line = line.replace('&#169;', ' (c) ')\n line = line.replace('&#xa9;', ' (c) ')\n line = line.replace(u'\\xa9', ' (c) ')\n # FIXME: what is \\xc2???\n line = line.replace(u'\\xc2', '')\n\n # TODO: add more HTML entities replacements\n # see http://www.htmlhelp.com/reference/html40/entities/special.html\n # convert html entities &#13;&#10; CR LF to space\n line = line.replace(u'&#13;&#10;', ' ')\n line = line.replace(u'&#13;', ' ')\n line = line.replace(u'&#10;', ' ')\n\n # normalize (possibly repeated) quotes to unique single quote '\n # backticks ` and \"\n line = line.replace(u'`', \"'\")\n line = line.replace(u'\"', \"'\")\n line = re.sub(MULTIQUOTES_RE(), \"'\", line)\n # quotes to space? but t'so will be wrecked\n # line = line.replace(u\"'\", ' ')\n\n # some trailing garbage ')\n line = line.replace(\"')\", ' ')\n\n\n # note that we do not replace the debian tag by a space: we remove it\n line = re_sub(DEBIAN_COPYRIGHT_TAGS_RE(), '', line)\n\n line = re_sub(IGNORED_PUNCTUATION_RE(), ' ', line)\n\n # tabs to spaces\n line = line.replace('\\t', ' ')\n\n # normalize spaces around commas\n line = line.replace(' , ', ', ')\n\n # remove ASCII \"line decorations\"\n # such as in --- or === or !!! or *****\n line = re_sub(ASCII_LINE_DECO_RE(), ' ', line)\n line = re_sub(ASCII_LINE_DECO2_RE(), ' ', line)\n\n # Replace escaped literal \\0 \\n \\r \\t that may exist as-is by a space\n # such as in code literals: a=\"\\\\n some text\"\n line = line.replace('\\\\r', ' ')\n line = line.replace('\\\\n', ' ')\n line = line.replace('\\\\t', ' ')\n line = line.replace('\\\\0', ' ')\n\n # TODO: Why?\n # replace contiguous spaces with only one occurrence\n # line = re.sub(WHITESPACE_RE(), ' ', text)\n\n # normalize to ascii text\n line = commoncode.text.toascii(line)\n # logger.debug(\"ascii_only_text: \" + text)\n\n # strip verbatim back slash and comment signs again at both ends of a line\n # FIXME: this is done at the start of this function already\n line = line.strip('\\\\/*#%;')\n\n # normalize to use only LF as line endings so we can split correctly\n # and keep line endings\n line = commoncode.text.unixlinesep(line)\n # why?\n line = lowercase_well_known_word(line)\n\n return line", "def preprocess_raw_text(self, lower_case = True, punctuation = \"delete\"):\n\n\n\t\tprocessed_text = []\n\t\tfor line in self.raw:\n\t\t\tprocessed_line = self.process_line(line, lower_case, punctuation)\n\t\t\tif len(processed_line):\n\t\t\t\tprocessed_text.append(processed_line)\n\n\t\tself.processed_text = processed_text", "def header(self, header, default=None):\n result = []\n header_value = self.email.get(header, default)\n if header_value:\n for part in decode_header(header_value):\n if part[1]:\n encoded = part[0].decode(part[1])\n elif isinstance(part[0], bytes):\n encoded = part[0].decode('utf-8')\n else:\n encoded = part[0]\n result.append(encoded.strip())\n header_value = ' '.join(result)\n\n return header_value", "def transform_line(line):\n for proc, pattern in INCLUDE_LINES:\n if pattern in line:\n # Log lines look like this:\n # pylint: disable=line-too-long\n # Mar 12 09:58:43 jumphost-win-02 metadata-server[5259]: I0312 09:58:43.756257 5259 server.go:87] Updated token: [redacted]\n timestamp = line.split('jumphost', 1)[0]\n suffix = line.split(pattern, 1)[1].rstrip()\n return timestamp + proc + ': ' + pattern + suffix\n return None", "def getHeaders(self):\n\n\t\tself.line = self.mctalFile.readline().split()\n\n\t\tif len(self.line) == 7:\n\t\t\tself.header.kod = self.line[0]\n\t\t\tself.header.ver = self.line[1]\n\t\t\tpID_date = self.line[2]\n\t\t\tself.header.probid = np.append(self.header.probid, pID_date)\n\t\t\tpID_time = self.line[3]\n\t\t\tself.header.probid = np.append(self.header.probid, pID_time)\n\t\t\tself.header.knod = int(self.line[4])\n\t\t\tself.header.nps = int(self.line[5])\n\t\t\tself.header.rnr = int(self.line[6])\n\t\telif len(self.line) == 3:\n\t\t\tself.header.knod = int(self.line[0])\n\t\t\tself.header.nps = int(self.line[1])\n\t\t\tself.header.rnr = int(self.line[2])\n\t\t\t\n\n\t\tself.header.title = self.mctalFile.readline().strip()\n\n\t\tself.line = self.mctalFile.readline().split()\n\n\t\tself.header.ntal = int(self.line[1])\n\n\t\tif self.header.ntal == 0:\n\t\t\tprint >> sys.stderr, \"\\n \\033[1;31mNo tallies in this MCTAL file. Exiting.\\033[0m\\n\"\n\t\t\tsys.exit(1)\n\n\t\tif len(self.line) == 4:\n\t\t\tself.header.npert = int(self.line[3])\n\t\t\tprint >> sys.stderr, \"\\n \\033[1;31mMCTAL file with perturbation card. Not supported. Exiting.\\033[0m\\n\"\n\t\t\tsys.exit(1)\n\n\t\tself.line = self.mctalFile.readline().split()\n\n\t\twhile self.line[0].lower() != \"tally\":\n\t\t\tfor l in self.line: self.header.ntals = np.append(self.header.ntals,int(l))\n\t\t\tself.line = self.mctalFile.readline().split()", "def clean_header(klass, s):\n return re.sub(r\"[\\n\\r\\t]+\", \" \", s).strip()", "def pre_body(self):\n return file_ops.read_utf8(self.pre_file_name)", "def transform_fasta_header(fastaheader):\n\n fastq_source, read_header = fastaheader.split(\" \", 1)[0].rsplit(\"_\", 1)\n fastq_base = fastq_source.rsplit(\"_\", 1)[0]\n return fastq_base, read_header", "def pre_process_code_block(block):\n if 'indent' in block and block['indent']:\n indent = r'^' + block['indent']\n block['content'] = re.sub(indent, '', block['icontent'],\n flags=re.MULTILINE)", "def process_message(self, msg, src):", "def _split(self):\n text = self.md\n self.parts = parts = []\n self.headers = headers = []\n lines = []\n\n # Split in parts\n for line in text.splitlines():\n if line.startswith((\"# \", \"## \", \"### \", \"#### \", \"##### \")):\n # Finish pending lines\n parts.append(\"\\n\".join(lines))\n lines = []\n # Process header\n level = len(line.split(\" \")[0])\n title = line.split(\" \", 1)[1]\n title_short = title.split(\"(\")[0].split(\"<\")[0].strip().replace(\"`\", \"\")\n headers.append((level, title_short))\n parts.append((level, title_short, title))\n else:\n lines.append(line)\n parts.append(\"\\n\".join(lines))\n\n # Now convert all text to html\n for i in range(len(parts)):\n if not isinstance(parts[i], tuple):\n parts[i] = markdown.markdown(parts[i], extensions=[]) + \"\\n\\n\"", "def decode_header(header):\n new_header = {}\n\n for item in header:\n split = item.split('\\t')\n new_header[split[0].replace(':', '')] = split[1].replace(\"\\r\\n\", \"\")\n\n return new_header", "def addSBHeaders(self, prob, clues):\n if prob < options['Categorization','ham_cutoff']:\n disposition = options['Headers','header_ham_string']\n elif prob > options['Categorization','spam_cutoff']:\n disposition = options['Headers','header_spam_string']\n else:\n disposition = options['Headers','header_unsure_string']\n self.RememberClassification(disposition)\n self[options['Headers','classification_header_name']] = disposition\n if options['Headers','include_score']:\n disp = \"%.*f\" % (options[\"Headers\", \"header_score_digits\"], prob)\n if options[\"Headers\", \"header_score_logarithm\"]:\n if prob<=0.005 and prob>0.0:\n x=-math.log10(prob)\n disp += \" (%d)\"%x\n if prob>=0.995 and prob<1.0:\n x=-math.log10(1.0-prob)\n disp += \" (%d)\"%x\n self[options['Headers','score_header_name']] = disp\n if options['Headers','include_thermostat']:\n thermostat = '**********'\n self[options['Headers','thermostat_header_name']] = \\\n thermostat[:int(prob*10)]\n if options['Headers','include_evidence']:\n hco = options['Headers','clue_mailheader_cutoff']\n sco = 1 - hco\n evd = []\n for word, score in clues:\n if (word[0] == '*' or score <= hco or score >= sco):\n if isinstance(word, types.UnicodeType):\n word = email.Header.Header(word,\n charset='utf-8').encode()\n evd.append(\"%r: %.2f\" % (word, score))\n wrappedEvd = []\n headerName = options['Headers','evidence_header_name']\n lineLength = len(headerName) + len(': ')\n for component, index in zip(evd, range(len(evd))):\n wrappedEvd.append(component)\n lineLength += len(component)\n if index < len(evd)-1:\n if lineLength + len('; ') + len(evd[index+1]) < 78:\n wrappedEvd.append('; ')\n else:\n wrappedEvd.append(';\\n\\t')\n lineLength = 8\n self[headerName] = \"\".join(wrappedEvd)\n if isinstance(options[\"Headers\", \"notate_to\"], types.StringTypes):\n notate_to = (options[\"Headers\", \"notate_to\"],)\n else:\n notate_to = options[\"Headers\", \"notate_to\"]\n if disposition in notate_to:\n try:\n self.replace_header(\"To\", \"%s,%s\" % (disposition,\n self[\"To\"]))\n except KeyError:\n self[\"To\"] = disposition\n if isinstance(options[\"Headers\", \"notate_subject\"], types.StringTypes):\n notate_subject = (options[\"Headers\", \"notate_subject\"],)\n else:\n notate_subject = options[\"Headers\", \"notate_subject\"]\n if disposition in notate_subject:\n try:\n self.replace_header(\"Subject\", \"%s,%s\" % (disposition,\n self[\"Subject\"]))\n except KeyError:\n self[\"Subject\"] = disposition\n if options['Headers','add_unique_id']:\n self[options['Headers','mailid_header_name']] = self.id", "def get_email_details(header: str) -> dict:\r\n try:\r\n m = re.match(\r\n r\"\"\"\r\n ([\\w\\W]* # remove lines \r\n (\r\n ^Date: \\s*(?P<date>[\\w\\W]{25}) # obtain date (\"date\")\r\n |^From: \\s*(?P<from>[\\w\\W]*?$) # obtain sender (\"from\")\r\n |^To: \\s*(?P<to>[\\w\\W]*?$) # obtain receiver (\"to\")\r\n |^Subject: \\s*(?P<subject>[\\w\\W]*?$) # obtain subject (\"subject\")\r\n )){4}\r\n \"\"\",\r\n header,\r\n re.VERBOSE | re.MULTILINE,\r\n )\r\n\r\n return m.groupdict()\r\n\r\n except:\r\n return None", "def parse_bro_smtp(smtp_path, target_dir, prefix='smtp'):\n\n # the current message we're parsing in the case of multiple emails coming in over the same connection\n smtp_message_index = 0 \n\n with open(smtp_path, 'r', errors='ignore') as fp:\n source_ipv4 = None\n source_port = None\n envelope_from = None\n envelope_to = []\n\n # state flag for when the data isn't quite right (see below)\n _bypass_read = False\n\n # the first line of the file has the source IP address of the smtp connection\n # in the following format: 172.16.139.143:38668/tcp\n\n line = fp.readline()\n m = REGEX_BRO_SMTP_SOURCE_IPV4.match(line)\n\n if not m:\n logging.error(f\"unable to parse soure address from {smtp_path} ({line.strip()})\")\n event_time = saq.LOCAL_TIMEZONE.localize(datetime.datetime.fromtimestamp(os.path.getmtime(smtp_path)))\n # in this case we skip the first readline() call since we've already read it\n _bypass_read = True\n else:\n source_ipv4 = m.group(1)\n source_port = m.group(2)\n\n logging.debug(f\"got source ipv4 {source_ipv4} port {source_port} for {smtp_path}\")\n\n # the second line is the time (in epoch UTC) that bro received the file\n line = fp.readline()\n event_time = datetime.datetime.utcfromtimestamp(int(line.strip()))\n logging.debug(f\"got event time {event_time} for {smtp_path}\")\n\n STATE_SMTP = 1\n STATE_DATA = 2\n\n state = STATE_SMTP\n rfc822_path = None\n rfc822_fp = None\n\n def _reset_state():\n nonlocal rfc822_fp, source_ipv4, source_port, envelope_from, envelope_to, state\n rfc822_fp = None\n #source_ipv4 = None\n #source_port = None\n envelope_from = None\n envelope_to = []\n state = STATE_SMTP\n\n def _finalize():\n # called when we detect the end of an SMTP stream OR the end of the file (data)\n nonlocal rfc822_fp, source_ipv4, source_port, envelope_from, envelope_to, state\n rfc822_fp.close()\n logging.info(\"finished parsing {} from {}\".format(rfc822_path, smtp_path))\n result = RFC822Email(\n source_ipv4=source_ipv4,\n source_port=source_port,\n envelope_from=envelope_from,\n envelope_to=envelope_to,\n received=event_time,\n file_path=rfc822_path)\n _reset_state()\n return result\n\n # smtp is pretty much line oriented\n while True:\n\n # if we read the first line and it wasn't what we expected\n # then we skip reading it here since we already have it\n if _bypass_read:\n _bypass_read = False\n else:\n line = fp.readline()\n\n if line == '':\n break\n\n if state == STATE_SMTP:\n m = REGEX_BRO_SMTP_MAIL_FROM.match(line)\n if m:\n envelope_from = m.group(1)\n logging.debug(\"got envelope_from {} for {}\".format(envelope_from, smtp_path))\n continue\n\n m = REGEX_BRO_SMTP_RCPT_TO.match(line)\n if m:\n envelope_to.append(m.group(1))\n logging.debug(\"got envelope_to {} for {}\".format(envelope_to, smtp_path))\n continue\n\n m = REGEX_BRO_SMTP_DATA.match(line)\n if m or (not line.startswith('<') and not line.startswith('>')):\n state = STATE_DATA\n rfc822_path = os.path.join(target_dir, f'{prefix}.{smtp_message_index}.email.rfc822')\n smtp_message_index += 1\n rfc822_fp = open(rfc822_path, 'w')\n logging.debug(\"created {} for {}\".format(rfc822_path, smtp_path))\n continue\n\n m = REGEX_BRO_SMTP_RSET.match(line)\n if m:\n logging.debug(f\"detected RSET for {smtp_path}\")\n _reset_state()\n continue\n\n # any other command we skip\n logging.debug(f\"skipping SMTP command {line.strip()}\")\n continue\n\n # otherwise we're reading DATA and looking for the end of that\n if line.strip() == ('> . .'):\n yield _finalize()\n continue\n\n rfc822_fp.write(line)\n continue\n\n # did the file end while we were reading SMTP data?\n if state == STATE_DATA:\n yield _finalize()", "def rfc822_parse(infile):\n\n result = {}\n current_key = None\n current_content = []\n\n for idx, line in enumerate(infile):\n line = line.rstrip()\n if not line.strip():\n if result:\n yield result\n result = {}\n current_key = None\n current_content = None\n continue\n\n if current_key:\n if line.startswith(' ') or line.startswith('\\t'):\n current_content.append(line.strip())\n continue\n else:\n result[current_key] = '\\n'.join(current_content).strip()\n\n try:\n current_key, content = line.split(':', 1)\n except ValueError:\n logging.warn('malformed rfc822 format on %s:%d\\n',\n getattr(infile, 'name', '?'), idx)\n raise\n\n current_content = [content]\n\n if current_key:\n result[current_key] = '\\n'.join(current_content)\n\n if result:\n yield result", "def construct_email_content(self):\n # Construct header of the message\n content = MAIL_HEAD_CONTENT.replace(\"TITLE_HOLDER\", self.title).replace('FAIL_JOB_HOLDER',\n self.fail_job_content).replace(\n \"TIME_HOLDER\", os.getenv(\"START_TIME\")).replace(\"GRAPH_HOLDER\", os.getenv(\"BENCHMARK_GRAPH\")).replace(\n \"JOB_HOLDER\", os.getenv(\"BENCHMARK_TYPE\")).replace(\"DEVICE_HOLDER\", os.getenv(\"DEVICE_TYPE\")).replace(\"CUDA_HOLDER\", os.getenv(\"VERSION_CUDA\")).replace('DISPLAY', self.job_display)\n\n if not self.alarm_info:\n return\n # Construct alarm content\n content += self.alarm_info\n # Construct the tail of the message\n content += MAIL_TAIL_CONTENT.replace(\"BENCHMARK_WEBSITE1\", os.getenv(\"BENCHMARK_WEBSITE1\", \"\")).strip().replace(\n 'RUN_ENV_HOLDER', self.env_content).replace(\"BENCHMARK_WEBSITE2\", os.getenv(\"BENCHMARK_WEBSITE2\"))\n\n with open(os.path.join(self.log_path, \"mail.html\"), \"w\") as f_object:\n f_object.write(content)", "def return_text_without_headlines(text):\n\n text = text.replace('\\\\n', '\\n')\n text = text.replace('\\\\r', '\\r')\n text = re.sub('h1. (.*)\\r', '', text)\n text = re.sub('h2. (.*)\\r', '', text)\n text = re.sub('h2. (.*)', '', text)\n text = re.sub('h3. (.*)\\r', '', text)\n text = re.sub('h4. (.*)\\r', '', text)\n text = text.replace('*acceptance criteria:*', \"\")\n text = text.replace('*acceptance criteria*:', \"\")\n text = text.replace('*acceptance criteria*', \"\")\n text = text.replace('*story:*', \"\")\n text = text.replace('*story*:', \"\")\n text = text.replace('*story*', \"\")\n text = text.replace('*stories:*', \"\")\n text = text.replace('*questions:*', \"\")\n text = text.replace('*questions*:', \"\")\n text = text.replace('*questions*', \"\")\n text = text.replace('*implementation notes:*', \"\")\n text = text.replace('*implementation notes*:', \"\")\n text = text.replace('*implementation notes*', \"\")\n text = text.replace('*notes:*', \"\")\n text = text.replace('*notes*:', \"\")\n text = text.replace('*notes*', \"\")\n text = text.replace('*Acceptance Criteria:*', \"\")\n text = text.replace('*Acceptance Criteria*:', \"\")\n text = text.replace('*Acceptance Criteria*', \"\")\n text = text.replace('*Story:*', \"\")\n text = text.replace('*Story*:', \"\")\n text = text.replace('*Story*', \"\")\n text = text.replace('*Stories:*', \"\")\n text = text.replace('*Questions:*', \"\")\n text = text.replace('*Questions*:', \"\")\n text = text.replace('*Questions*', \"\")\n text = text.replace('*Implementation Notes:*', \"\")\n text = text.replace('*Implementation Notes*:', \"\")\n text = text.replace('*Implementation Notes*', \"\")\n text = text.replace('*Notes:*', \"\")\n text = text.replace('*Notes*:', \"\")\n text = text.replace('*Notes*', \"\")\n text = text.replace('*Acceptance criteria:*', \"\")\n text = text.replace('*Acceptance criteria*:', \"\")\n text = text.replace('*Acceptance criteria*', \"\")\n text = text.replace('*Implementation notes:*', \"\")\n text = text.replace('*Implementation notes*:', \"\")\n text = text.replace('*Implementation notes*', \"\")\n text = text.replace('*Acceptance Criteria:*', \"\")\n text = text.replace('*Acceptance Criteria*:', \"\")\n text = text.replace('*Acceptance Criteria*', \"\")\n text = text.replace('*Implementation Notes:*', \"\")\n text = text.replace('*Implementation Notes*:', \"\")\n text = text.replace('*Implementation Notes*', \"\")\n text = text.replace(':\\r\\n****', \" \")\n text = text.replace('\\r\\n****', \". \")\n text = text.replace(':\\n****', \" \")\n text = text.replace('\\n****', \". \")\n text = text.replace(':\\r\\n***', \" \")\n text = text.replace('\\r\\n***', \". \")\n text = text.replace(':\\n***', \" \")\n text = text.replace('\\n***', \". \")\n text = text.replace(':\\r\\n**', \" \")\n text = text.replace('\\r\\n**', \". \")\n text = text.replace(':\\n**', \" \")\n text = text.replace('\\n**', \". \")\n text = text.replace(':\\r\\n*', \" \")\n text = text.replace('\\r\\n*', \". \")\n text = text.replace(':\\n*', \" \")\n text = text.replace('\\n*', \". \")\n text = text.replace(':\\r\\n\\r\\n', \" \")\n text = text.replace('\\r\\n\\r\\n', \". \")\n text = text.replace(':\\r\\n', \" \")\n text = text.replace('\\r\\n', \". \")\n text = text.replace('.\\n', \". \")\n text = text.replace('\\n', \" \")\n text = text.replace('.\\r', \". \")\n text = text.replace('\\r', \" \")\n text = text.replace('\\\\n', '\\n')\n text = text.replace('\\\\t', '\\t')\n text = text.replace('\\\\r', '\\r')\n text = text.replace('\\n', \" \")\n text = text.replace('\\r', \" \")\n text = text.replace('\\t', \" \")\n text = ' '.join(text.split())\n return text", "def _build_header_dictionary(self):\n start = 0\n #print self.raw_data\n for a in range(20):\n redatapuller = re.compile(\"\\r\\n\\r\\n\\r\\n(?P<word>.*?)\\t.*?\\n\", re.DOTALL)\n m = redatapuller.search(self.raw_data[start:])\n if not(m):\n break\n self.header_dictionary[m.group(\"word\")] = start + m.end()\n if a==0:\n self.header_dictionary[\"main\"] = start + m.end()\n start += m.end()", "def _msg2header(self, msg_files, msg_dir_path, node_name,\n cfe_platform_dir_path, pkg_name): \n basic_data_types = [\"int8\", \"int16\", \"int32\", \"int64\", \"float32\",\n \"float64\", \"string\", \"time\", \"duration\", \"uint8\",\n \"uint16\", \"uint32\", \"uint64\"]\n convert_data_types = {\"int8\": \"char\", \"int16\": \"short\", \"int32\": \"int\",\n \"int64\": \"long\", \"float32\": \"float\",\n \"float64\": \"double\", \"string\": \"std::string\",\n \"time\": \"RosTime\", \"duration\": \"duration\",\n \"uint8\": \"unsigned char\",\n \"uint16\": \"unsigned short\",\n \"uint32\": \"unsigned int\",\n \"uint64\": \"unsigned long\"}\n for msg_file_name in msg_files:\n include_list = []\n include_list.append(\"#include <string>\")\n include_list.append(\"#include <vector>\")\n include_list.append(\"#include \\\"cfe.h\\\"\")\n data_list = []\n vector_list = []\n struct_list = []\n # Read message file\n rfp = open(msg_dir_path + \"/\" + msg_file_name, 'r')\n msg_lines = []\n const_list = []\n for row in rfp:\n # Read message fileRead message file\n row = row[:row.find(\"#\")]\n if len(row) == 0:\n continue\n if row.find(\"=\") != -1:\n const_list.append(row)\n continue\n line = re.sub(r'[\\s\\t]+', \" \", row)\n msg_lines.append(line)\n\n for row in msg_lines:\n # Split row\n splited_row = row.strip().split(\" \")\n array_flag = 0\n data_name = splited_row[1]\n # Case of basic data\n if splited_row[0] in basic_data_types:\n data_type = convert_data_types[splited_row[0]]\n if data_type.find(\"[]\") >= 0:\n array_flag = 1\n data_type = data_type.replace(\"[]\", \"\")\n if data_type == \"RosTime\":\n include_list.append(\n \"#include \\\"../std_msg/RosTime.h\\\"\")\n # Case of other structures\n else:\n struct_list.append(data_name)\n data_type = splited_row[0]\n if data_type.find(\"[]\") >= 0:\n array_flag = 1\n data_type = data_type.replace(\"[]\", \"\")\n\n data_pkg_name = \"\"\n if data_type in [\"Point\", \"Pose\", \"PoseStamped\",\n \"PoseWithCovariance\", \"Quaternion\",\n \"Twist\", \"TwistWithCovariance\", \"Vector3\",\n \"Wrench\", \"WrenchStamped\"]:\n include_list.append(\n \"#include \\\"../geometry_msgs/\" + data_type + \".h\\\"\")\n data_pkg_name = \"geometry_msgs\"\n elif data_type in [\"Odometry\"]:\n include_list.append(\n \"#include \\\"../nav_msgs/\" + data_type + \".h\\\"\")\n data_pkg_name = \"nav_msgs\"\n elif data_type in [\"JointState\", \"Image\"]:\n include_list.append(\n \"#include \\\"../sensor_msgs/\" + data_type + \".h\\\"\")\n data_pkg_name = \"sensor_msgs\"\n elif data_type in [\"Float64MultiArray\", \"Header\",\n \"RosTime\", \"MultiArrayDimension\",\n \"MultiArrayLayout\", \"String\", \"Empty\"]:\n include_list.append(\n \"#include \\\"../std_msgs/\" + data_type + \".h\\\"\")\n data_pkg_name = \"std_msgs\"\n else:\n include_list.append(\n \"#include \\\"../\" + node_name + \"/\" + data_type + \".h\\\"\")\n data_pkg_name = node_name\n data_type = data_pkg_name + \"::\" + data_type\n\n # Case of arrays\n if array_flag == 1:\n vector_list.append([data_type, data_name])\n data_list.append(\"std::vector<\" +\n data_type + \"> \" + data_name)\n data_list.append(data_type + \"* \" + data_name + \"Data\")\n data_list.append(\"uint32 \" + data_name + \"DataSize\")\n # Case of not an array\n else:\n data_list.append(data_type + \" \" + data_name)\n rfp.close()\n # Write definition to header file\n wfp = open(cfe_platform_dir_path + \"/\" +\n msg_file_name.replace(\".msg\", \".h\"), \"w\")\n define_name = \"_\" + msg_file_name.upper().replace(\".\", \"_\") + \"_\"\n wfp.write(\"#ifndef \" + define_name + \"\\n\")\n wfp.write(\"#define \" + define_name + \"\\n\")\n wfp.write(\"\\n\")\n for include_line in include_list:\n wfp.write(include_line + \"\\n\")\n wfp.write(\"\\n\")\n wfp.write(\"namespace \" + pkg_name + \"\\n\")\n wfp.write(\"{\\n\")\n wfp.write(\" typedef struct\\n\")\n wfp.write(\" {\\n\")\n wfp.write(\" uint8 TlmHeader[CFE_SB_TLM_HDR_SIZE];\\n\")\n for data_line in data_list:\n wfp.write(\" \" + data_line + \";\\n\")\n wfp.write(\"\\n\")\n wfp.write(\" void vector2pointer()\\n\")\n wfp.write(\" {\\n\")\n wfp.write(\"\\n\")\n for struct_line in struct_list:\n wfp.write(\" \" + struct_line +\n \".vector2pointer();\\n\")\n wfp.write(\"\\n\")\n for vector_line in vector_list:\n vector_type = vector_line[0]\n vector_name = vector_line[1]\n wfp.write(\" \" + vector_name + \"Data = (\" + vector_type +\n \"*)malloc(\" + vector_name + \".size() * sizeof(\" + vector_type + \"));\\n\")\n wfp.write(\" for(int ii = 0; ii < \" +\n vector_name + \".size(); ii++)\\n\")\n wfp.write(\" \" + vector_name +\n \"Data[ii] = \" + vector_name + \"[ii];\\n\")\n wfp.write(\" \" + vector_name +\n \"DataSize = \" + data_name + \".size();\\n\")\n wfp.write(\" std::vector<\" +\n vector_type + \">().swap(\" + vector_name + \");\\n\")\n wfp.write(\" }\\n\")\n wfp.write(\"\\n\")\n wfp.write(\" void pointer2vector()\\n\")\n wfp.write(\" {\\n\")\n wfp.write(\"\\n\")\n for struct_line in struct_list:\n wfp.write(\" \" + struct_line +\n \".pointer2vector();\\n\")\n wfp.write(\"\\n\")\n for vector_line in vector_list:\n vector_type = vector_line[0]\n vector_name = vector_line[1]\n wfp.write(\" uint32 \" + vector_name +\n \"_size = \" + vector_name + \"DataSize;\\n\")\n wfp.write(\" std::vector<\" + vector_type +\n \">().swap(\" + vector_name + \");\\n\")\n wfp.write(\" for (size_t ii = 0; ii < \" +\n vector_name + \"_size; ii++) {\\n\")\n wfp.write(\" \" + vector_name +\n \".push_back(\" + vector_name + \"Data[ii]);\\n\")\n wfp.write(\" }\\n\")\n wfp.write(\" }\\n\")\n wfp.write(\"\\n\")\n wfp.write(\" void deleteData()\\n\")\n wfp.write(\" {\\n\")\n for struct_line in struct_list:\n wfp.write(\" \" + struct_line + \".deleteData();\\n\")\n wfp.write(\"\\n\")\n for vector_line in vector_list:\n vector_name = vector_line[1]\n wfp.write(\" free(\" + vector_name + \");\\n\")\n wfp.write(\" }\\n\")\n wfp.write(\"\\n\")\n wfp.write(\" void string2pointer()\\n\")\n wfp.write(\" {\\n\")\n for struct_line in struct_list:\n wfp.write(\" \" + struct_line +\n \".string2pointer();\\n\")\n wfp.write(\"\\n\")\n wfp.write(\" }\\n\")\n wfp.write(\"\\n\")\n wfp.write(\" void pointer2string()\\n\")\n wfp.write(\" {\\n\")\n for struct_line in struct_list:\n wfp.write(\" \" + struct_line +\n \".pointer2string();\\n\")\n wfp.write(\"\\n\")\n wfp.write(\" }\\n\")\n wfp.write(\"\\n\")\n\n # Define constants\n for const_line in const_list:\n wfp.write(\" \" + const_line + \";\\n\")\n wfp.write(\"\\n\")\n wfp.write(\" } \" + msg_file_name.replace(\".msg\", \"\") + \";\\n\")\n\n wfp.write(\" typedef \" + msg_file_name.replace(\".msg\", \"\") +\n \"* const \" + msg_file_name.replace(\".msg\", \"\") + \"ConstPtr;\\n\")\n\n wfp.write(\"}\\n\")\n wfp.write(\"#endif // \" + define_name + \"\\n\")\n wfp.close()\n\n # Record of message file pass\n self._saveMsgFilePath(pkg_name, msg_dir_path + \"/\" + msg_file_name,\n msg_file_name.replace(\".msg\", \"\"))", "def parse_headers(fp, _class=http.client.HTTPMessage):\n headers = []\n while True:\n line = fp.readline(http.client._MAXLINE + 1)\n if len(line) > http.client._MAXLINE:\n raise http.client.LineTooLong(\"header line\")\n headers.append(line)\n if len(headers) > http.client._MAXHEADERS:\n raise HTTPException(f\"got more than {http.client._MAXHEADERS} headers\")\n if line in (b'\\r\\n', b'\\n', b''):\n break\n\n hstring = b''.join(headers)\n inferred = chardet.detect(hstring)\n if inferred and inferred['confidence'] > 0.8:\n # print(\"Parsing headers!\", hstring)\n hstring = hstring.decode(inferred['encoding'])\n else:\n hstring = hstring.decode('iso-8859-1')\n\n return email.parser.Parser(_class=_class).parsestr(hstring)", "def RewriteHeader(match):\n if match.group('type') == 'E':\n replacement = 'Error'\n else:\n replacement = 'Warning'\n # replace as 'Warning (W0511, funcName): Warning Text'\n return '%s (%s%s):' % (replacement, match.group('type'),\n match.group('remainder'))", "def _unserialize_header(self, data, persistent_start):\n name = \"\"\n sbuffer = data\n # Skip characters until a valid message id appears\n while len(sbuffer) >= self.header_size:\n header = sbuffer[:self.header_size]\n if repr(header) in self.messages:\n name = header\n break\n if not persistent_start:\n break\n sbuffer = sbuffer[1:]\n return name, len(data) - len(sbuffer)", "def report_preparation(data):\n report_file_path = (\n f'{os.path.abspath(\".\")}/{Common.get_config_value(\"report_location\")}'\n )\n fd = open(f\"{report_file_path}/mail_report.html\", \"w\")\n fd.write(\n \"\"\"\n <html>\n <head>\n <meta http-equiv=\"Content-Type\" content=\"text/html charset=UTF-8\" />\n <style>\n table {\n font-family: arial, sans-serif;\n border-collapse: collapse;\n width: 100%;\n }\n\n th {\n border: 1px solid #000000;\n text-align: center;\n padding: 8px;\n }\n td {\n border: 1px solid #000000;\n text-align: center;\n padding: 8px;\n }\n </style>\n </head>\n\n <body>\n <p><font color=\"black\"> Hi All </font></p>\n \"\"\"\n )\n fd.write(\n \"\"\"\n <p><font color=\"black\">{}\n </font></p>\n <table>\n <thead>\n <tr>\n <th> Job Category </th>\n <th> Highlighted information/Test Failure</th>\n <th> Job URL </th>\n <th> Bugzilla </th>\n <th> Job Status </th>\n </tr></thead> \"\"\".format(\n data[\"body\"]\n )\n )\n data.pop(\"body\")\n report_file_path = (\n f'{os.path.abspath(\".\")}/{Common.get_config_value(\"report_location\")}'\n )\n\n if os.path.isfile(f\"{report_file_path}/subject\"):\n os.remove(f\"{report_file_path}/subject\")\n if os.path.isfile(f\"{report_file_path}/recipient\"):\n os.remove(f\"{report_file_path}/recipient\")\n with open(f\"{report_file_path}/subject\", \"wb\") as handler:\n pickle.dump(data[\"subject\"], handler)\n data.pop(\"subject\")\n\n with open(f\"{report_file_path}/recipient\", \"wb\") as handler:\n pickle.dump(data[\"recipient\"], handler)\n data.pop(\"recipient\")\n for _ in data:\n fd.write(\"<tr><td>{}</td>\".format(_, data[_]))\n fd.write(\"<td>\")\n for content in data[_][\"highlighted_information\"]:\n if (content.lstrip()).rstrip():\n if re.search(r\"tests.\", f\"{content}\"):\n fd.write(\n f'<font color=red><li align=\"left\">{(content.lstrip()).rstrip()}</li></font>'\n )\n else:\n fd.write(f'<li align=\"left\">{(content.lstrip()).rstrip()}</li>')\n fd.write(\"</td>\")\n fd.write(f\"<td><a href={data[_]['Build Url']}>Job Link</a></td>\")\n fd.write(\"<td>\")\n for bz in data[_][\"bugzilla\"].split(\".\"):\n if bz.lstrip().rstrip():\n fd.write(\n f\" <a href=https://bugzilla.xyz.com/show_bug.cgi?id={bz}>{bz}</a> \"\n )\n else:\n fd.write(f\"{bz}\")\n fd.write(\"</td>\")\n if data[_][\"Build_Status\"] == \"SUCCESS\":\n color = \"green\"\n fd.write(f\"<td><font color={color}>PASSED</font></td>\")\n else:\n color = \"red\"\n fd.write(f\"<td><font color={color}>FAILED</font></td>\")\n fd.write(\n \"\"\"\n </table>\n </body>\n <p><font color=\"black\">Note: For more details</font>\n <form action=\"https://wikipage></form></p>\n <p><font color=\"black\">Thanks</font><br>\n <font color=\"black\">xyz</font><p>\n </html>\"\"\"\n )\n fd.close()\n Common.logger.info(\"Report prepared for the selected job and their type\")", "def format_header(self, text: str, anchor: Optional[str] = None) -> str:", "def getheader(header_text, default=\"ascii\"):\n # Borrowed from: http://ginstrom.com/scribbles/2007/11/19/parsing-multilingual-email-with-python/\n\n headers = email.Header.decode_header(header_text)\n header_sections = [unicode(text, charset or default)\n for text, charset in headers]\n return u\" \".join(header_sections)", "def get_bp_headers(self) -> None:\n self.bp_headers = []\n for bp in self.body_parts_lst:\n c1, c2, c3 = (f\"{bp}_x\", f\"{bp}_y\", f\"{bp}_p\")\n self.bp_headers.extend((c1, c2, c3))", "def parse_header_block(lines):\n data = [line for line in lines[:MAX_HEADER_HEIGHT] if line.strip()]\n if not data or not INVITATION_RE.match(data[0]):\n return None\n out = {'number':None, 'type':None, 'date':None, 'time':None, 'place':None, 'datetime':None}\n for item in data:\n # typ a poradove cislo zastupitelstva\n m = TITLE_RE.match(item)\n if m:\n out['number'] = m.group(1).strip()\n out['type'] = m.group(2).strip()\n\n # den konani zastupitelstva\n m = TERM_DATE_RE.match(item)\n if m:\n try:\n out['date'] = date(int(m.group(3).strip()), int(m.group(2).strip()), int(m.group(1).strip()))\n except ValueError:\n pass\n\n # cas konani zastupitelstva\n m = TERM_TIME_RE.match(item)\n if m:\n try:\n out['time'] = time(int(m.group(1).strip()), int(m.group(2).strip()))\n except ValueError:\n pass\n\n # misto konani zastupitelstva\n m = PLACE_RE.match(item)\n if m:\n out['place'] = m.group(1).strip()\n\n # poskladani kompletniho datetime objektu\n out['datetime'] = out['date'] and out['time'] and \\\n datetime.combine(out['date'], out['time']) or None\n\n return out" ]
[ "0.66956985", "0.65403616", "0.620646", "0.6043575", "0.59951806", "0.5961931", "0.5931331", "0.5886571", "0.5792057", "0.5707722", "0.56881887", "0.5639475", "0.56263655", "0.561612", "0.5583248", "0.5579559", "0.55687916", "0.5543469", "0.5506505", "0.5480917", "0.54660165", "0.54598546", "0.5456983", "0.5449584", "0.5415977", "0.53913707", "0.5387937", "0.5374598", "0.53492486", "0.53050655", "0.52741414", "0.5273417", "0.5271429", "0.5270869", "0.5255983", "0.52556103", "0.52495044", "0.52466565", "0.5202866", "0.52022284", "0.52006274", "0.51986146", "0.518697", "0.5186709", "0.51747096", "0.51630193", "0.516277", "0.5162326", "0.5148186", "0.5146107", "0.51403046", "0.5136017", "0.51352865", "0.5115336", "0.5114312", "0.51116216", "0.51024216", "0.5096657", "0.5085037", "0.5077176", "0.50629896", "0.50616425", "0.5048715", "0.50261873", "0.50238", "0.50175565", "0.5010891", "0.5007435", "0.49930102", "0.49924484", "0.49802536", "0.49758604", "0.4964558", "0.49554622", "0.49521017", "0.49470195", "0.49449682", "0.49286038", "0.49264312", "0.49258626", "0.4925426", "0.49219906", "0.49173176", "0.49059463", "0.49051204", "0.49018902", "0.48966908", "0.4893976", "0.4893108", "0.48877898", "0.48873645", "0.4882242", "0.4878085", "0.48768646", "0.4871398", "0.48678532", "0.4866961", "0.48628724", "0.48625347", "0.4853702" ]
0.5239302
38
Used by JVM for java.lang.String and hence by some popular java memcached clients as default such as
def JAVA_NATIVE(key): h = 0 l = len(key) for (idx,c) in enumerate(key): h += ord(c)*31**(l-(idx+1)) return _signed_int32(h)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def string_cache_key_adapter(obj):\n return obj", "def store_string(self, string: str) -> None:", "def stringable(self):\n return True", "def __init__(self) -> None:\n str.__init__(self)", "def test_str(self):\n dummy = DummyCryptographicObject()\n str(dummy)", "def intern(string): # real signature unknown; restored from __doc__\n return \"\"", "def simple_str(self):\n pass", "def __init__(self, string: str):\r\n self.string = string", "def test_native_str(self):\n if PY2:\n import __builtin__\n builtin_str = __builtin__.str\n else:\n import builtins\n builtin_str = builtins.str\n\n inputs = [b'blah', u'blah', 'blah']\n for s in inputs:\n self.assertEqual(native_str(s), builtin_str(s))\n self.assertTrue(isinstance(native_str(s), builtin_str))", "def string(self):\n return self._my_string", "def test_stringToString(self):\n self.assertNativeString(\"Hello!\", \"Hello!\")", "def test_string_default(self):\r\n default = 'BLAKE!'\r\n prop = String(default=default, required=True)\r\n self.assertEqual(prop.to_database(None), prop.to_database(default))", "def __init__(self, string):\n self.string = string", "def __init__(self, string: str) -> None:\r\n self.string = string", "def is_string(value):\n return isinstance(value, (str, bytes))", "def __init__(self):\n self.string = None", "def _GetKeyString(self):", "def _GetKeyString(self):", "def as_default_string(string):\n return same_string_type_as(compat.default_string_type(), string)", "def safeToString():", "def get_string2(self):\n pass", "def is_string_type(self):\n raise exceptions.NotImplementedError()", "def setString(self, name: unicode, value: unicode) -> None:\n ...", "def get_string(self, **kwargs):\n ...", "def __init__ (self, string, weight=10):\n self.weight = weight\n str.__init__(self, string)", "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def test_as_native_str(self):\n class MyClass(object):\n @as_native_str()\n def __repr__(self):\n return u'abc'\n\n obj = MyClass()\n\n self.assertEqual(repr(obj), 'abc')\n if PY2:\n self.assertEqual(repr(obj), b'abc')\n else:\n self.assertEqual(repr(obj), u'abc')", "def test_string_update(self):\r\n vm = String.value_manager(None, None, 'str')\r\n assert not vm.changed\r\n vm.value = 'unicode'\r\n assert vm.changed", "def test_string_conversion():\n ob = ConversionTest()\n\n assert ob.StringField == \"spam\"\n assert ob.StringField == u\"spam\"\n\n ob.StringField = \"eggs\"\n assert ob.StringField == \"eggs\"\n assert ob.StringField == u\"eggs\"\n\n ob.StringField = u\"spam\"\n assert ob.StringField == \"spam\"\n assert ob.StringField == u\"spam\"\n\n ob.StringField = u'\\uffff\\uffff'\n assert ob.StringField == u'\\uffff\\uffff'\n\n ob.StringField = System.String(\"spam\")\n assert ob.StringField == \"spam\"\n assert ob.StringField == u\"spam\"\n\n ob.StringField = System.String(u'\\uffff\\uffff')\n assert ob.StringField == u'\\uffff\\uffff'\n\n ob.StringField = None\n assert ob.StringField is None\n\n with pytest.raises(TypeError):\n ConversionTest().StringField = 1\n\n world = UnicodeString()\n test_unicode_str = u\"안녕\"\n assert test_unicode_str == str(world.value)\n assert test_unicode_str == str(world.GetString())\n assert test_unicode_str == str(world)", "def _GetKeyString(self):\n return self.__key_string", "def get_magic_quotes_runtime():\n raise NotImplementedError()", "def get_charset(self, default: str) -> str:\n ...", "def test_bytesToString(self):\n self.assertNativeString(b\"hello\", \"hello\")", "def is_string(value):\n return isinstance(value, basestring)", "def test_bytes_to_native_str(self):\n b = bytes(b'abc')\n s = bytes_to_native_str(b)\n if PY2:\n self.assertEqual(s, b)\n else:\n self.assertEqual(s, 'abc')\n self.assertTrue(isinstance(s, native_str))\n self.assertEqual(type(s), native_str)", "def __init__(self, str):\n pass", "def __getitem__(self, *args):\n return _libsbml.string___getitem__(self, *args)", "def native_string(input_var):\n if isinstance(input_var, str):\n return input_var\n\n return input_var.decode('utf-8', 'replace')", "def value(self, p_str, p_str_1=None): # real signature unknown; restored from __doc__ with multiple overloads\n return \"\"", "def __call__(self, value: str):\n return self.basetype(value)", "def get(self, key, default=None):\n try:\n # get the value from the cache\n value = self._cache.get(self.prepare_key(key))\n if value is None:\n return default\n # pickle doesn't want a unicode!\n value = smart_str(value)\n # hydrate that pickle\n return pickle.loads(value)\n except Exception as err:\n return self.warn_or_error(err)", "def cstringio_buf(self):\r\n pass", "def __init__(self, str=None, hashfunc=None):\r\n self.str = str\r\n if hashfunc:\r\n self.hashfunc = hashfunc", "def set_strmem_type(self, *args):\n return _ida_hexrays.vdui_t_set_strmem_type(self, *args)", "def type(self, string):\n\n\t\tself._interface.type(string)", "def getString(self, name: unicode) -> unicode:\n ...", "def _map_disk_type(cls, str_, bytes_):\n if cls._disk_type is str:\n return str_\n elif cls._disk_type is bytes:\n return bytes_\n else:\n raise TypeError(f\"DiskType must be str or bytes, not {cls._disk_type}\")", "def get_as_string(self, use_cache_if_available=True):\n obj_bytes = self.get_bytes(use_cache_if_available=use_cache_if_available)\n return obj_bytes.decode(\"utf-8\")", "def test_string_method(self):\n self.base.id = \"1234-5678-9012\"\n self.assertEqual(str(self.base), self.dict_obj)", "def testAddingItem(self):\n\n foo = \"bar\"\n memcache.add('foo', foo)\n assert memcache.get('foo') == foo\n\n tres_bien = u\"Très bien\".encode('utf-8')\n memcache.add('tres_bien', tres_bien)\n assert memcache.get('tres_bien') == tres_bien\n\n items = [u'foo', 'bar', tres_bien, {1: 'one'}, 42L]\n memcache.add('items', items)\n assert memcache.get('items') == items\n\n number = 10\n memcache.add('number', number)\n assert memcache.get('number') == number\n\n long_number = long(20)\n memcache.add('long', long_number)\n assert memcache.get('long') == long_number\n\n yes = True\n memcache.add('yes', yes)\n assert memcache.get('yes') == yes\n\n greeting = 'Hello'\n memcache.set('greeting', greeting, namespace='me')\n assert memcache.get('greeting') is None\n assert memcache.get('greeting', namespace='me') == greeting\n assert memcache.get('greeting', namespace='no') is None\n\n unicode_data = ['Äquator'.decode('utf-8'),]\n memcache.set('unicode', unicode_data)\n self.assertEqual(unicode_data, memcache.get('unicode'))\n assert type(memcache.get('unicode')) == list", "def test_key_str(self):\n key = Key({\"warning\": False, \"inCar\": True})\n\n string = str(key)\n assert isinstance(string, str)\n assert string == \"{'warning': False, 'in_car': True}\"", "def get_magic_quotes_gpc():\n raise NotImplementedError()", "def test_model_keyspace_attribute_must_be_a_string(self):", "def test_string_key():\n\tbackup_and_restore(\n\t\tlambda context: put_keys(lib.SET, STRING_KEYS, \"foobar\", False),\n\t\tNone,\n\t\tlambda context: check_keys(lib.SET, STRING_KEYS, \"foobar\", False)\n\t)", "def test_snmpcustomstring_get_kind(self):\n assert_equal(self.test_snmpcustomstring.get_kind(), 'mpsnmpcustomstring')", "def from_str(cls, string):", "def test_str(self):\n self.assertEqual(\n \"\\N{SNOWMAN}\",\n bytes_to_str(\"\\N{SNOWMAN}\"),\n )", "def test_to_String(self) -> None:\n assert to_String(1) == \"1\", to_String(1)\n assert to_String([1, 2, 3]) == str([1, 2, 3]), to_String([1, 2, 3])\n assert to_String(\"foo\") == \"foo\", to_String(\"foo\")\n assert to_String(None) == 'None'\n # test low level string converters too\n assert to_str(None) == 'None'\n assert to_bytes(None) == b'None'\n\n s1 = UserString('blah')\n assert to_String(s1) == s1, s1\n assert to_String(s1) == 'blah', s1\n\n class Derived(UserString):\n pass\n\n s2 = Derived('foo')\n assert to_String(s2) == s2, s2\n assert to_String(s2) == 'foo', s2", "def native_(s, encoding='latin-1', errors='strict'):\n if isinstance(s, text_type):\n return s\n return str(s, encoding, errors)", "def test_string():", "def __new__(cls, value):\r\n if isinstance(value, unicode):\r\n return unicode.__new__(cls, value)\r\n return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)", "def get(self, key):\n return \"\"", "def native_string(text):\n return cinder_utils.convert_str(text)", "def _force_string(x):\n if isinstance(x, basestring):\n return x\n else:\n return str(x)", "def test_unicode_string(self):\n result = attributeAsLDIF(\"another key\", \"another value\")\n self.assertEqual(result, b\"another key: another value\\n\")", "def test_value_max_string(self):\n raw = [\n 0x41,\n 0x41,\n 0x41,\n 0x41,\n 0x41,\n 0x42,\n 0x42,\n 0x42,\n 0x42,\n 0x42,\n 0x43,\n 0x43,\n 0x43,\n 0x43,\n ]\n string = \"AAAAABBBBBCCCC\"\n self.assertEqual(DPTString.to_knx(string), raw)\n self.assertEqual(DPTString.from_knx(raw), string)", "def _is_string(self, obj):\n return isinstance(obj, unicode) or isinstance(obj, str)", "def __GetKeyString(self):\n return self._GetKeyString()", "def parse_message(self, message):\n # This should run in a separate thread\n message = pickle.loads(message)\n self.logger.log_bytes(message)\n\n if message[0] == \"set\":\n return self.cache.set(message[1], message[2])\n\n elif message[0] == \"del\":\n return self.cache.delete(message[1])\n\n elif message[0] == \"get\":\n return self.cache.get(message[1])\n\n elif message[0] == \"add\":\n return self.cache.add(message[1], message[2])\n\n else:\n print(\"Only these keywords are supported: get, set, delete\")\n\n return message", "def string_vector(self):\n pass", "def test_value_empty_string(self):\n raw = [\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n ]\n string = \"\"\n self.assertEqual(DPTString.to_knx(string), raw)\n self.assertEqual(DPTString.from_knx(raw), string)", "def __setitem__(self, *args):\n return _libsbml.string___setitem__(self, *args)", "def is_string(obj):\n return isinstance(obj, basestring)", "def smart_bytes(s):\n # Handle the common case first for performance reasons.\n if isinstance(s, bytes) or isinstance(s, _PROTECTED_TYPES):\n return s\n if isinstance(s, memoryview):\n return bytes(s)\n return s.encode()", "def give_me_bytes(string):\n return string.encode('utf8') if isinstance(string, str) else string", "def string_id(self):\n id = self.id()\n if not isinstance(id, basestring):\n id = None\n return id", "def get_string(self):\n return self.__str", "def type_str_of(x):\n try:\n # what other way? this is only way I know of, to detect XML-RPC server.\n if x.hasattr(x,\"_ServerProxy__host\"):\n return \"XML-RPC\"\n \n return { type(\"string\"): \"STR\",\n type(42): \"INT\",\n type(42.0): \"FLOAT\",\n type([]): \"LIST\",\n type({}): \"DICT\",\n type(Ref(\"\")): \"REF\",\n }[ type(x) ]\n except:\n return \"Not a string, int, float, list, or dict.\"", "def _to_native_string(string, encoding='ascii'):\n if isinstance(string, str):\n out = string\n else:\n out = string.decode(encoding)\n\n return out", "def test_str_method(self):\n _name = 'test-name'\n el = MarkerId(_name)\n self.assertEqual(el.__str__(), _name)", "def test_get_service_string(self):\n pass", "def __str__(self):\n return str(self.GetString())", "def test_byte_string(self):\n result = attributeAsLDIF(b\"some key\", b\"some value\")\n self.assertEqual(result, b\"some key: some value\\n\")", "def test_str(self):\n client = ClientInfo(\"cc:cc:cc:cc:cc:cc\", ip=\"3.3.3.3\", ap_info=self.ap)\n self.assertEqual(str(client), \"Client cc:cc:cc:cc:cc:cc - 3.3.3.3\")\n self.assertEqual(str(client), client.name)", "def test_str(self):\n obj = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n expected = str(binascii.hexlify(self.bytes_a))\n observed = str(obj)\n self.assertEqual(expected, observed)", "def as_str_any(value):\n if isinstance(value, bytes):\n return as_str(value)\n else:\n return str(value)", "def test_string_in_serializer() -> None:\n assert cv.custom_serializer(cv.string) == {\n \"type\": \"string\",\n }", "def __init__(self, input_str):\n raise NotImplementedError(\"This method needs to be implemented.\")", "def getvalue(self):\n try:\n buffer_value = self.buffer.getvalue().decode()\n except UnicodeDecodeError:\n return self.buffer.getvalue()\n return super().getvalue() + buffer_value", "def decode_string(self, value):\r\n return value", "def test_unicodeToString(self):\n self.assertNativeString(u\"Good day\", \"Good day\")", "def test_strings(self):\n # Message. Double-nested to ensure serializers are recursing properly.\n message = {\n \"values\": {\n # UTF-8 sequence for british pound, but we want it not interpreted into that.\n \"utf-bytes\": b\"\\xc2\\xa3\",\n # Actual unicode for british pound, should come back as 1 char\n \"unicode\": \"\\u00a3\",\n # Emoji, in case someone is using 3-byte-wide unicode storage\n \"emoji\": \"\\u1F612\",\n # Random control characters and null\n \"control\": b\"\\x01\\x00\\x03\\x21\",\n }\n }\n # Send it and receive it\n channel_layer.send(\"str_test\", message)\n _, received = channel_layer.receive_many([\"str_test\"])\n # Compare\n self.assertIsInstance(received[\"values\"][\"utf-bytes\"], six.binary_type)\n self.assertIsInstance(received[\"values\"][\"unicode\"], six.text_type)\n self.assertIsInstance(received[\"values\"][\"emoji\"], six.text_type)\n self.assertIsInstance(received[\"values\"][\"control\"], six.binary_type)\n self.assertEqual(received[\"values\"][\"utf-bytes\"], message[\"values\"][\"utf-bytes\"])\n self.assertEqual(received[\"values\"][\"unicode\"], message[\"values\"][\"unicode\"])\n self.assertEqual(received[\"values\"][\"emoji\"], message[\"values\"][\"emoji\"])\n self.assertEqual(received[\"values\"][\"control\"], message[\"values\"][\"control\"])", "def test_str(self):\n self.assertEqual(str(self.bs), str(self.wbs))\n self.assertEqual(str(self.be), str(self.be))\n # str(us) fails in Python 2\n self.assertEqual(str, type(str(self.wus)))\n # str(ue) fails in Python 2\n self.assertEqual(str, type(str(self.wue)))", "def smart_str(s):\n # Handle the common case first for performance reasons.\n if issubclass(type(s), str) or isinstance(s, _PROTECTED_TYPES):\n return s\n if isinstance(s, bytes):\n return str(s, 'utf-8')\n return str(s)", "def __getitem__(self, key):\n if type(key) is str:\n return self.encode(key)\n elif type(key) is list or type(key) is tuple:\n return self.decode(key)", "def test_noUnicode(self):\n s = proto_helpers.StringTransport()\n self.assertRaises(TypeError, s.write, \"foo\")", "def _safe_key(self, key):\n if isinstance(key, str):\n key = key.encode('UTF-8')\n return key", "def str_to_python(self, value):\r\n return unicode_safe(value)", "def process_string(string: str) -> str:\n\n return string if string else Presenter.DEFAULT", "def sortof_type_str_of(x):\n if hasattr(x,\"_ServerProxy__host\"):\n return \"XML-RPC\"\n if hasattr(x,\"__setitem__\"):\n if hasattr(x,\"keys\"):\n return \"DICT\"\n if hasattr(x,\"append\"):\n return \"LIST\"\n if hasattr(x,\"join\" ):\n return \"STR\"\n if hasattr(x,\"__add__\") and hasattr(x,\"__sub__\"):\n if hasattr(x,\"__and__\"):\n return \"INT\"\n else:\n return \"FLOAT\"\n if hasattr(x,\"url\") and hasattr(x,\"start_pt\"):\n return \"REF\"", "def _sanitize_string(self, string):\n # get the type of a unicode string\n unicode_type = type(Pyasciigraph._u('t'))\n input_type = type(string)\n if input_type is str:\n if sys.version < '3':\n info = unicode(string)\n else:\n info = string\n elif input_type is unicode_type:\n info = string\n elif input_type is int or input_type is float:\n if sys.version < '3':\n info = unicode(string)\n else:\n info = str(string)\n else:\n info = str(string)\n return info" ]
[ "0.6431343", "0.58831644", "0.5787502", "0.57085586", "0.5668089", "0.56542784", "0.55438024", "0.5541255", "0.5535427", "0.5526042", "0.5480257", "0.54440296", "0.54340893", "0.5433637", "0.5404673", "0.5362585", "0.5343884", "0.5343884", "0.5334441", "0.5314773", "0.5311111", "0.53062487", "0.52925575", "0.5270048", "0.52558464", "0.5239609", "0.5222829", "0.5222549", "0.52211493", "0.52013195", "0.5193248", "0.5185836", "0.51835483", "0.5176396", "0.5173848", "0.5165932", "0.51484174", "0.5126722", "0.5120437", "0.511982", "0.51190794", "0.5111015", "0.5109953", "0.507839", "0.5061586", "0.504399", "0.5033799", "0.5023571", "0.5018484", "0.5017852", "0.5013395", "0.5012752", "0.5000383", "0.498739", "0.49774396", "0.49683625", "0.4952875", "0.49462575", "0.4945534", "0.4942513", "0.4931484", "0.49275717", "0.49225044", "0.4915425", "0.4901996", "0.49011382", "0.48998412", "0.48989785", "0.48977146", "0.48895106", "0.48833898", "0.48804715", "0.4879566", "0.48722747", "0.48695594", "0.48687518", "0.48654544", "0.4858935", "0.48533303", "0.48440036", "0.48436105", "0.4836338", "0.48337173", "0.48258495", "0.48197109", "0.4818269", "0.48176757", "0.4814429", "0.48136955", "0.48089993", "0.48081025", "0.48070186", "0.48062325", "0.48051625", "0.48016945", "0.47913873", "0.4787485", "0.47803906", "0.47782415", "0.4776153", "0.47687006" ]
0.0
-1
MD5based hashing algorithm used in consistent hashing scheme to compensate for servers added/removed from memcached pool.
def KETAMA(key): d = hashlib.md5(key).digest() c = _signed_int32 h = c((ord(d[3])&0xff) << 24) | c((ord(d[2]) & 0xff) << 16) | \ c((ord(d[1]) & 0xff) << 8) | c(ord(d[0]) & 0xff) return h
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def MD5(self) -> _n_0_t_3[_n_0_t_9]:", "def pool_hash(path_list):\n return pool_process(md5_tuple, path_list, 'MD5 hashing')", "def calc_md5(string):\n\treturn md5(string).hexdigest()", "def __hash_md5__(self, text):\n key = hashlib.md5()\n key.update(text.encode('utf-8'))\n return key.digest()", "def md5hash(string):\n return hashlib.md5(string).hexdigest()", "def _md5(input):\n m = hashlib.md5()\n m.update(input)\n return m.hexdigest()", "def md_5_hash(i):\n h = hashlib.md5(i.encode('utf-8')).hexdigest()\n return h", "def __md5_hash(txt) -> str:\n\n return md5_crypt.hash(txt)", "def compute_md5_for_string(string):\n return base64.b64encode(hashlib.md5(string).digest())", "def md5(val):\n return hashlib.md5(val).hexdigest()", "def _compute_hal9000_md5(observable: Observable) -> str:\n md5_hasher = md5()\n md5_hasher.update(observable.type.encode('utf-8', errors='ignore'))\n md5_hasher.update(observable.value.encode('utf-8', errors='ignore'))\n return md5_hasher.hexdigest()", "def hash(self, string):\n h = md5()\n h.update(string)\n return h.digest()", "def get_md5(s):\n m = hashlib.md5()\n m.update(s.encode('utf8'))\n return m.hexdigest()", "def aws_md5(data):\n hasher = hashlib.new(\"md5\")\n if hasattr(data, \"read\"):\n data.seek(0)\n while True:\n chunk = data.read(8192)\n if not chunk:\n break\n hasher.update(chunk)\n data.seek(0)\n else:\n hasher.update(data)\n return b64encode(hasher.digest()).decode(\"ascii\")", "def get_md5(text):\n return hashlib.md5(text).hexdigest()", "def _md5sum(data):\n hash = hashlib.md5()\n hash.update(six.b(data))\n hash_hex = hash.hexdigest()\n return hash_hex", "def _hash_value(value):\n return hashlib.md5(value.encode('utf-8')).hexdigest()[:9]", "def md5hash(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"md5hash\")", "def HashAlgorithm(self) -> _n_7_t_0:", "def calc_md5(code):\n md5 = hashlib.md5()\n md5.update(code)\n return md5.hexdigest()", "def md5_hash(self) -> str:\n\n ordered_model_data = sort_dictionary(self.data, recursive=True)\n\n return md5(json.dumps(ordered_model_data).encode(\"utf-8\")).hexdigest()", "def my_md5(inp):\n # https://en.wikipedia.org/wiki/MD5#Pseudocode\n global s, K # `s` and `K` are global\n\n # Initialize variables\n a0 = 0x67452301 # A\n b0 = 0xefcdab89 # B\n c0 = 0x98badcfe # C\n d0 = 0x10325476 # D\n\n # Convert input string to bit string\n msg = ''.join(f'{ord(i):08b}' for i in inp)\n\n # append \"1\" bit to message\n msg += '1'\n\n # append \"0\" bit until message length in bits = 448 (mod 512)\n msg += '0'*(448 - len(msg))\n\n # append original length in bits mod 2**64 to message\n msg += '{0:064b}'.format(ch_endian64(len(inp)*8))\n\n assert len(msg) == 512\n\n # Process the message in successive 512-bit chunks:\n # for each 512-bit chunk of padded message do\n # break chunk into sixteen 32-bit words M[j], 0 <= j <= 15\n #\n # ~> We have 1 chunk, so no need for that\n\n # Initialize hash value for this chunk:\n A, B, C, D = a0, b0, c0, d0 \n b_values = []\n\n # Main loop:\n for i in range(64):\n if 0 <= i and i <= 15:\n F = (B & C) | (~B & D)\n g = i\n elif 16 <= i and i <= 31:\n F = (D & B) | (~D & C)\n g = (5*i + 1) % 16\n elif 32 <= i and i <= 47:\n F = B ^ C ^ D\n g = (3*i + 5) % 16\n elif 48 <= i <= 63:\n F = C ^ (B | ~D)\n g = (7*i) % 16\n\n F &= 0xFFFFFFFF\n\n inp_chunk = ch_endian(int(msg[32*g:32*g + 32], 2))\n\n # Be wary of the below definitions of a,b,c,d\n F = (F + A + K[i] + inp_chunk) & 0xFFFFFFFF # M[g] must be a 32-bits block\n A = D\n D = C\n C = B\n B = (B + rol(F, s[i])) & 0xFFFFFFFF\n\n print(f'{i:2d}: A:{A:08X}, B:{B:08X}, C:{C:08X}, D:{D:08X} ~> g:{g} $ {inp_chunk:08X} $ X:{B & 0x3FF:03X}')\n\n b_values.append(B & 0x3FF) # Get the leak.\n\n # Add this chunk's hash to result so far:\n a0 = (a0 + A) & 0xFFFFFFFF\n b0 = (b0 + B) & 0xFFFFFFFF\n c0 = (c0 + C) & 0xFFFFFFFF\n d0 = (d0 + D) & 0xFFFFFFFF\n # end for\n\n a0 = ch_endian(a0)\n b0 = ch_endian(b0)\n c0 = ch_endian(c0)\n d0 = ch_endian(d0)\n\n print(f'{a0:08X}-{b0:08X}-{c0:08X}-{d0:08X}')\n \n # var char digest[16] := a0 append b0 append c0 append d0 // (Output is in little-endian)\n print(f'{a0:08x}{b0:08x}{c0:08x}{d0:08x}')\n\n return b_values", "def md5_sum(string):\n m = hashlib.md5()\n m.update(string.encode(\"utf-8\"))\n return m.hexdigest()", "def md5(input_string):\n return hashlib.md5(input_string.encode('utf-8')).hexdigest()", "def hashing_info(string):#KEY HASHING FUNCTION\n nodeInfo = string.encode('utf-8')\n\n #md5 -> 2^7 = 128 bits\n hash_object = hashlib.md5()\n hash_object.update(nodeInfo)\n\n tmp = hash_object.hexdigest()\n tmp = int(tmp,16)\n\n result = tmp >> (128-16)\n return result", "def hashing(smiles):\n import hashlib\n hash_object = hashlib.md5(canonical_smiles_from_smiles(smiles).encode(\"utf-8\"))\n return hash_object.hexdigest()", "def _hash_5tuple(ip_A, ip_B, tp_src, tp_dst, proto):\n if ip_A > ip_B:\n direction = 1\n elif ip_B > ip_A:\n direction = 2\n elif tp_src > tp_dst:\n direction = 1\n elif tp_dst > tp_src:\n direction = 2\n else:\n direction = 1\n hash_5t = hashlib.md5()\n if direction == 1:\n flow_tuple = (ip_A, ip_B, tp_src, tp_dst, proto)\n else:\n flow_tuple = (ip_B, ip_A, tp_dst, tp_src, proto)\n flow_tuple_as_string = str(flow_tuple)\n hash_5t.update(flow_tuple_as_string)\n return hash_5t.hexdigest()", "def md5(string: str) -> str:\n\treturn str(hashlib.md5(string.encode()).hexdigest())", "def hash_obj(self, obj):\r\n md5er = hashlib.md5()\r\n update_hash(md5er, obj)\r\n return md5er.hexdigest()", "def get_hash(s):\n hash_object = hashlib.md5(s.encode())\n return hash_object.hexdigest()", "def calc_md5(s: Union[bytes, str]) -> str:\n h = hashlib.new(\"md5\")\n\n b = s.encode(\"utf-8\") if isinstance(s, str) else s\n\n h.update(b)\n return h.hexdigest()", "def md5(obj):\n import hashlib\n # print \"self.conf\", str(self.conf)\n # if type(obj) is not str:\n # obj = str(obj)\n # print('type(obj)', type(obj))\n m = hashlib.md5(obj.encode())\n return m", "def md5_sum(content):\r\n md5_hash = hashlib.md5(content).hexdigest()\r\n return md5_hash", "def count_md5hash_bytes(byte_flow):\n hash_md5 = hashlib.md5()\n hash_md5.update(byte_flow)\n return hash_md5.hexdigest()", "def vectorization_md5_hash(self):\n keys = sorted(pr.__dict__)\n keys.remove('threshold_config')\n keys.remove('threshold_center')\n return hashlib.md5(\n str([pr.__dict__[i] for i in keys]).encode()\n ).hexdigest()", "def _hash(self, key):\n\n return long(hashlib.md5(key).hexdigest(), 16)", "def _hash(self, key):\n\n return long(hashlib.md5(key).hexdigest(), 16)", "def md5Hash(pathAndFilename, blockSize=8192):\n hashcode = hashlib.md5()\n with open(pathAndFilename, \"rb\" ) as f:\n block = f.read(blockSize)\n while len(block)>0:\n hashcode.update(block)\n block = f.read(blockSize)\n return hashcode.hexdigest()", "def hash_password(password):\n password_md5 = hashlib.md5(password.encode('utf-8')).hexdigest()\n for i in range(0, len(password_md5), 2):\n if password_md5[i] == '0':\n password_md5 = password_md5[0:i] + 'c' + password_md5[i + 1:]\n return password_md5", "def get_md5(self, line):\n m = hashlib.md5()\n m.update(str(line).encode('utf-8'))\n return m.hexdigest()", "def md5_hash(file_path):\n with open(file_path, 'rb') as fp:\n return md5(fp.read()).hexdigest()", "def apply_hash (self, s):\r\n m = md5()\r\n m.update (s)\r\n d = m.digest()\r\n # base64.encodestring tacks on an extra linefeed.\r\n return encodestring (d)[:-1]", "def get_hash(input, method='md5', salt=settings.SECRET_KEY):\n h = hashlib.new(method)\n h.update(str(input))\n h.update(salt)\n return h.hexdigest()", "def string_to_md5(content):\n return hashlib.md5(content).hexdigest()", "def md5(s: str) -> str:\n return hashlib.md5(s.encode()).hexdigest()", "def default_md5(key: KeyT, *args, **kwargs) -> bytes:\n return md5(key).digest() # type: ignore", "def _Hash(self):\n out = [self.key.string_id()]\n properties = self._PropList()\n for prop in properties:\n out.append(unicode(getattr(self, prop, '')))\n to_hash = ''.join(out)\n return hashlib.md5(to_hash.encode('utf-8')).hexdigest()", "def get_md5(f: BinaryIO) -> str:\n BLOCKSIZE = 65536\n hasher = hashlib.md5()\n buf = f.read(BLOCKSIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = f.read(BLOCKSIZE)\n return hasher.hexdigest()", "def digest(self, message):\n\n hasher = hashlib.md5()\n hasher.update(message)\n digest = hasher.digest()[0:self.HASHLEN]\n\n return binascii.hexlify(digest)", "def md5(s1):\n s = str(s1)\n h1 = hashlib.md5()\n h1.update(s.encode(encoding='utf-8'))\n s = h1.hexdigest()\n return s", "def apache_md5crypt(password, salt, magic='$apr1$'):\n # /* The password first, since that is what is most unknown */ /* Then our magic string */ /* Then the raw salt */\n import md5\n m = md5.new()\n m.update(password + magic + salt)\n\n # /* Then just as many characters of the MD5(pw,salt,pw) */\n mixin = md5.md5(password + salt + password).digest()\n for i in range(0, len(password)):\n m.update(mixin[i % 16])\n\n # /* Then something really weird... */\n # Also really broken, as far as I can tell. -m\n i = len(password)\n while i:\n if i & 1:\n m.update('\\x00')\n else:\n m.update(password[0])\n i >>= 1\n\n final = m.digest()\n\n # /* and now, just to make sure things don't run too fast */\n for i in range(1000):\n m2 = md5.md5()\n if i & 1:\n m2.update(password)\n else:\n m2.update(final)\n\n if i % 3:\n m2.update(salt)\n\n if i % 7:\n m2.update(password)\n\n if i & 1:\n m2.update(final)\n else:\n m2.update(password)\n\n final = m2.digest()\n\n # This is the bit that uses to64() in the original code.\n\n itoa64 = './0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'\n\n rearranged = ''\n for a, b, c in ((0, 6, 12), (1, 7, 13), (2, 8, 14), (3, 9, 15), (4, 10, 5)):\n v = ord(final[a]) << 16 | ord(final[b]) << 8 | ord(final[c])\n for i in range(4):\n rearranged += itoa64[v & 0x3f]; v >>= 6\n\n v = ord(final[11])\n for i in range(2):\n rearranged += itoa64[v & 0x3f]; v >>= 6\n\n return magic + salt + '$' + rearranged", "def _get_local_md5(self, blocksize=2**20):\n m = hashlib.md5()\n with open(self.dst, \"rb\") as f:\n buf = f.read(blocksize)\n while buf:\n m.update(buf)\n buf = f.read(blocksize)\n return m.hexdigest()", "def crack_md5(cand_len, b_values):\n global s, K # `s` and `K` are global\n\n slv = z3.Solver()\n \n inp = [z3.BitVec(f'inp_{i}', 32) for i in range(16)]\n\n add_inp_constraint(cand_len, inp, slv)\n\n # MD5 implementation using symbolic variables.\n a0 = 0x67452301 # A\n b0 = 0xefcdab89 # B\n c0 = 0x98badcfe # C\n d0 = 0x10325476 # D\n\n A, B, C, D = a0, b0, c0, d0\n \n for i in range(64):\n if 0 <= i and i <= 15:\n F = (B & C) | (~B & D)\n g = i\n elif 16 <= i and i <= 31:\n F = (D & B) | (~D & C)\n g = (5*i + 1) % 16\n elif 32 <= i and i <= 47:\n F = B ^ C ^ D\n g = (3*i + 5) % 16\n elif 48 <= i <= 63:\n F = C ^ (B | ~D)\n g = (7*i) % 16\n\n F &= 0xFFFFFFFF\n F = (F + A + K[i] + inp[g]) & 0xFFFFFFFF \n A = D\n D = C\n C = B\n\n # NOTE: rol DOES NOT WORK! WE HAVE TO USE z3's `RotateLeft`.\n B = (B + z3.RotateLeft(F, s[i])) & 0xFFFFFFFF\n\n slv.add(B & 0x3FF == b_values[i])\n\n \n # Check for solutions\n def to_ascii(x):\n return chr(x & 0xFF) + chr((x >> 8) & 0xFF) + chr((x >> 16) & 0xFF) + chr(x >> 24)\n\n while slv.check() == z3.sat:\n mdl = slv.model()\n\n print('[+] Solution FOUND!')\n \n flag = ''\n for i, j in enumerate(inp):\n yy = mdl.evaluate(j).as_long() \n print(f'[+] {i:2d} ~~> {yy:08X} ~~> {repr(to_ascii(yy))}')\n flag += to_ascii(yy)\n\n flag = flag[:cand_len]\n\n print('[+] FLAG IS: hxp{%s}' % flag)\n return 1\n else:\n print('[+] Cannot find satisfiable solution :\\\\')\n return -1", "def md5(fname):\n hash = hashlib.md5()\n with open(fname, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash.update(chunk)\n return hash.hexdigest()", "def digest(string):\n return md5(string.encode(\"utf-8\")).hexdigest()", "def generate_hash(*args):\n key = bytes(' '.join(args), 'utf_8')\n hashh = hashlib.md5()\n hashh.update(key)\n return hashh.hexdigest()", "def create_hash(tree_string):\n return hashlib.md5(tree_string.encode()).hexdigest()", "def python_repo_hash_md5(root_dir: str, *, verbose: bool = False):\n m = hashlib.md5()\n for e in _collect_entries(root_dir, '.'):\n if verbose:\n log_info('Processing e', e)\n m.update(\n f\"path={e['path']}\\tisdir={e['isdir']}\\tsize={e['size']}\\tmode={e['mode']:03o}\\tmtime={e['mtime']}\\n\"\n .encode('UTF-8'))\n\n return m.hexdigest()", "def md5(filename: str) -> str:\n # using md5 for speed\n _hash = hashlib.md5()\n # open file for reading in binary mode\n with open(filename,'rb') as file:\n for block in iter(lambda: file.read(1024), b\"\"):\n _hash.update(block)\n return _hash.hexdigest()", "def md5(self):\n\t\tfrom utils import get_md5\n\t\t# from hashlib import md5\n\t\t# m = md5()\n\t\t# m.update(str(self.html))\n\t\t# return m.hexdigest()\n\t\treturn get_md5(str(self.html))", "def local_md5(filepath, blocksize=65536):\n hasher = hashlib.md5()\n with open(filepath, 'rb') as source:\n buf = source.read(blocksize)\n while len(buf) > 0:\n hasher.update(buf)\n buf = source.read(blocksize)\n return hasher.hexdigest()", "def hashLink(link):\n\n return str(md5.new(link).hexdigest())[:5]", "def md5_hexdigest(data):\n\n if not (data and isinstance(data, six.text_type)):\n raise Exception(\"invalid data to be hashed: %s\", repr(data))\n\n encoded_data = data.encode(\"utf-8\")\n\n if not new_md5:\n m = md5.new() # nosec\n else:\n m = md5()\n m.update(encoded_data)\n\n return m.hexdigest()", "def calculate_hash_id(self):\n return get_md5_hash(f'{self.type}{self.get_primary_id()}')", "def get_md5(string):\r\n byte_string = string.encode(\"utf-8\")\r\n md5 = hashlib.md5()\r\n md5.update(byte_string)\r\n result = md5.hexdigest()\r\n return 'M'+result", "def hash_file_md5(file_path, binary=False, buffer_size=65536):\n return hash_file(file_path, hash_type=hashlib.md5, binary=binary, buffer_size=buffer_size)", "def md5(self):\n return self.tag(\"md5\")", "def get_md5(self):\n self.md5sum = ''\n return self.md5sum", "def md5_hash(cls, origin, salt_prefix, salt_suffix, encoding: str = 'utf-8',\n\t\t\t\t double_md5: bool = False) -> str:\n\t\tif double_md5:\n\t\t\torigin = cls.md5_hash(origin, salt_prefix=salt_prefix, salt_suffix=salt_suffix, encoding=encoding,\n\t\t\t\t\t\t\t\t double_md5=False)\n\t\tmd5_hashed_obj = hashlib.md5(salt_prefix.encode(encoding) if type(salt_prefix) is str else salt_prefix)\n\t\tmd5_hashed_obj.update(origin.encode(encoding) if type(origin) is str else origin)\n\t\tmd5_hashed_obj.update(salt_suffix.encode(encoding) if type(salt_suffix) is str else salt_suffix)\n\n\t\treturn md5_hashed_obj.hexdigest()", "def _get_hasher(self):\n import hashlib\n\n # Try making the hash set from the columns marked 'hash'\n indexes = [i for i, c in enumerate(self.columns) if\n c.data.get('hash', False) and not c.is_primary_key]\n\n # Otherwise, just use everything by the primary key.\n if len(indexes) == 0:\n indexes = [\n i for i,\n c in enumerate(\n self.columns) if not c.is_primary_key]\n\n def hasher(values):\n m = hashlib.md5()\n for index in indexes:\n x = values[index]\n try:\n m.update(\n x.encode('utf-8') +\n '|') # '|' is so 1,23,4 and 12,3,4 aren't the same\n except:\n m.update(str(x) + '|')\n return int(m.hexdigest()[:14], 16)\n\n return hasher", "def create_config_hash(config):\n value_str = \"\"\n for section in config.sections:\n for key in section.keys():\n value_str += str(config[section][key])\n value_hash = hashlib.md5(value_str.encode('utf-8')).hexdigest()\n\n return value_hash", "def calc_file_md5(file_path):\n hash_md5 = str()\n method = hashlib.md5()\n if not os.path.exists(file_path):\n logger.error(\"File(%s) don not exist, can not calculation file hash\" % file_path)\n return hash_md5\n\n with open(file_path, 'rb') as f:\n for chunk in read_chunks(f, 1024 * 1024):\n method.update(chunk)\n return method.hexdigest()", "def hash_password(password):\n return hashlib.md5(password).hexdigest()", "def gen_hash(s: str) -> str:\n\n m = hashlib.md5()\n m.update(bytes(s, encoding = 'utf8'))\n hash_code = str(m.hexdigest())\n\n return hash_code", "def md5hash(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"md5hash\")", "def md5(file_path):\r\n\r\n hasher = hashlib.md5()\r\n with Profiler():\r\n with open(file_path, 'rb') as f:\r\n while True:\r\n buf = f.read(BLOCKSIZE)\r\n if not buf:\r\n break\r\n while len(buf) > 0:\r\n hasher.update(buf)\r\n buf = f.read(BLOCKSIZE)\r\n md5_hash = (hasher.hexdigest()).upper()\r\n return md5_hash", "def rss_md5(string):\r\n if not isinstance(string, basestring):\r\n try: string = string.decode('utf8','replace')\r\n except: pass\r\n md5 = hashlib.md5()\r\n md5.update(string.encode('utf8'))\r\n return md5.hexdigest()", "def hash(self) -> bytes:", "def hash(self) -> str:\n return md5(bytes(self.url, encoding=\"utf8\")).hexdigest()", "def hash(self) -> str:\r\n ...", "def _calculate_link_hash(links):\n to_hash = ''.join(sorted(links.keys()))\n # Hashlib takes encoded Strings, not Unicode objects\n return hashlib.md5(to_hash.encode('utf-8')).hexdigest()", "def _create_md5(self, password) -> str:\n md5_hash = hashlib.md5(password.encode(\"utf-8\")).hexdigest()\n self.logger.debug(\"created md5 hash: %s\", md5_hash)\n\n return md5_hash", "def getmd5(image: Image):\n return hashlib.md5(image.tobytes()).hexdigest()", "def md5(path):\n with open(path, 'rb') as f:\n md5hash = hashlib.md5()\n for chunk in iter(lambda: f.read(4096), b''):\n md5hash.update(chunk)\n return md5hash.hexdigest()", "def md5(fname):\n hash_md5 = hashlib.md5()\n with open(fname, 'rb') as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()", "def compute_gzip_md5(fqfn):\n md5 = hashlib.md5()\n file_obj = gzip.open(fqfn, 'rb')\n for chunk in iter(lambda: file_obj.read(8192), ''):\n md5.update(chunk)\n\n file_obj.close()\n return md5.hexdigest()", "def hash(x):\r\n return (randint(1,5*c)*x + randint(1,5*c))%c", "def md5crypt(password: bytes, salt: Optional[bytes] = None, magic: bytes = b\"$1$\") -> bytes:\n password = smart_bytes(password)\n magic = smart_bytes(magic)\n salt = smart_bytes(salt) if salt else gen_salt(8)\n # /* The password first, since that is what is most unknown */ /* Then our magic string */ /* Then the raw salt */\n m = hashlib.md5(smart_bytes(password + magic + salt))\n # /* Then just as many characters of the MD5(pw,salt,pw) */\n mixin = hashlib.md5(smart_bytes(password + salt + password)).digest()\n for i in range(len(password)):\n m.update(bytes([mixin[i % 16]]))\n # /* Then something really weird... */\n # Also really broken, as far as I can tell. -m\n i = len(password)\n while i:\n if i & 1:\n m.update(b\"\\x00\")\n else:\n m.update(bytes([password[0]]))\n i >>= 1\n final = m.digest()\n # /* and now, just to make sure things don't run too fast */\n for i in range(1000):\n m2 = hashlib.md5()\n if i & 1:\n m2.update(smart_bytes(password))\n else:\n m2.update(smart_bytes(final))\n if i % 3:\n m2.update(smart_bytes(salt))\n if i % 7:\n m2.update(smart_bytes(password))\n if i & 1:\n m2.update(smart_bytes(final))\n else:\n m2.update(smart_bytes(password))\n final = m2.digest()\n # This is the bit that uses to64() in the original code.\n rearranged = []\n for a, b, c in REARRANGED_BITS:\n v = final[a] << 16 | final[b] << 8 | final[c]\n for i in range(4):\n rearranged += [ITOA64[v & 0x3F]]\n v >>= 6\n v = final[11]\n for i in range(2):\n rearranged += [ITOA64[v & 0x3F]]\n v >>= 6\n return magic + salt + b\"$\" + bytes(rearranged)", "def md5_of_bytes(data: bytes) -> bytes:\n return hashlib.md5(data).digest()", "def useOldServerHashFunction():\r\n global serverHashFunction\r\n serverHashFunction = crc32", "def secret_hash(data):\n\n passwords_hash = hashlib.md5(data.encode(\"UTF-8\")).hexdigest()\n \n return passwords_hash", "def hash_file(fname):\n hash_md5 = hashlib.md5()\n with open(fname, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()", "def md5(fname):\n\t\n\thash_md5 = hashlib.md5()\n\twith open(fname, \"rb\") as f:\n\t\tfor chunk in iter(lambda: f.read(4096), b\"\"):\n\t\t\thash_md5.update(chunk)\n\t\n\treturn hash_md5.hexdigest()", "def fasthash(string):\r\n md4 = hashlib.new(\"md4\")\r\n md4.update(string)\r\n return md4.hexdigest()", "def chunkedmd5(filename,csize=8192):\n md5=hashlib.md5()\n with open(filename,'rb') as f:\n for chunk in iter(lambda: f.read(csize), b''):\n md5.update(chunk)\n return md5.digest().encode('hex')", "def get_md5_from_hexdigest(self, md5_hexdigest):\r\n import binascii\r\n digest = binascii.unhexlify(md5_hexdigest)\r\n base64md5 = base64.encodestring(digest)\r\n if base64md5[-1] == '\\n':\r\n base64md5 = base64md5[0:-1]\r\n return (md5_hexdigest, base64md5)", "def _hash_file(fpath, algorithm='sha256', chunk_size=65535):\n if (algorithm == 'sha256') or (algorithm == 'auto' and len(hash) == 64):\n hasher = hashlib.sha256()\n else:\n hasher = hashlib.md5()\n\n with open(fpath, 'rb') as fpath_file:\n for chunk in iter(lambda: fpath_file.read(chunk_size), b''):\n hasher.update(chunk)\n\n return hasher.hexdigest()", "def hash_functions(self):\n pass", "async def get_hash(identifier):\n return hashlib.md5(identifier.encode('utf8')).hexdigest()", "def md5sum_file(filepath):\n hasher = hashlib.md5()\n with open(filepath, 'rb') as infile:\n for chunk in util.chunk_reader(infile):\n hasher.update(chunk)\n return hasher.hexdigest()", "def __serialize_md5(self, md5):\n if md5:\n return md5.hexdigest()\n else:\n return None" ]
[ "0.7700844", "0.7656716", "0.7500732", "0.74877924", "0.7479576", "0.7474418", "0.725043", "0.7211859", "0.7142206", "0.71216583", "0.7043272", "0.7021252", "0.6984938", "0.69776475", "0.6957162", "0.6938334", "0.69136685", "0.69106406", "0.69105786", "0.6903468", "0.6874061", "0.6865856", "0.6858436", "0.6847527", "0.68323195", "0.682124", "0.6820859", "0.67840606", "0.6777261", "0.6776364", "0.67534775", "0.6749495", "0.67484725", "0.67482954", "0.6727948", "0.6713309", "0.6713309", "0.67131627", "0.6668293", "0.6649457", "0.66408396", "0.66399354", "0.6623188", "0.6619729", "0.6619099", "0.6615188", "0.6615125", "0.6606439", "0.65554506", "0.6547452", "0.65362954", "0.6529255", "0.6526905", "0.6520618", "0.6511394", "0.6507779", "0.65043443", "0.6493799", "0.649264", "0.6480343", "0.6466759", "0.6455888", "0.6455212", "0.6454896", "0.64508784", "0.6450097", "0.6436618", "0.64360654", "0.6412427", "0.64103156", "0.6402249", "0.6397002", "0.6361396", "0.63591516", "0.6351817", "0.63479906", "0.6340817", "0.6336628", "0.63276756", "0.6325412", "0.63177806", "0.63158524", "0.6312058", "0.63077307", "0.62942547", "0.6287708", "0.6265557", "0.6261931", "0.62476605", "0.62441593", "0.6237428", "0.62201107", "0.62145513", "0.62080413", "0.62060994", "0.61910915", "0.6182189", "0.6179979", "0.6166684", "0.6154111", "0.6153233" ]
0.0
-1
Auslesen aller Konversationen aus der Datenbank
def find_all(self): result = [] cursor = self._cnx.cursor() command = "SELECT id, konversation, nachricht_id, teilnehmer, herkunfts_id, ziel_id, inhalt FROM konversationen" cursor.execute(command) tuples = cursor.fetchall() for (id, konversation, nachricht_id, teilnehmer, herkunfts_id, ziel_id, inhalt) in tuples: konversation = Konversation() konversation.set_id(id) konversation.set_konversation(konversation) konversation.set_nachricht_id(nachricht_id) konversation.set_teilnehmer(teilnehmer) konversation.set_herkunfts_id(herkunfts_id) konversation.set_ziel_id(ziel_id) konversation.set_inhalt(inhalt) result.append(konversation) self._cnx.commit() cursor.close() return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def text_typing_block(self):\n\t # open the database using the masterpassword\n typing_text = 'Send({})\\n'.format(self.masterpassword)\n\n # add in exit - this is achieved using CTRL + q\n typing_text += 'Sleep(15677)\\n'\n typing_text += \"SendKeepActive('KeePass')\\n\"\n typing_text += 'Send(\"^q\")\\n'\n typing_text += \"; Reset Focus\\n\"\n typing_text += 'SendKeepActive(\"\")'\n\n return textwrap.indent(typing_text, self.indent_space)", "def text(message, user=None):\n\n def clear_status(user_id):\n db.set_user_status(0, user_id)\n return True\n\n if not user:\n teleBot.send_message(message.chat.id, \"Для использования бота необходимо зарегистрироваться /start.\")\n return\n\n if user.status < 1:\n if user.status == -222:\n teleBot.send_message(message.chat.id, \"Спасибо, ваша жалоба будет рассмотрена в ближайшее время.\")\n teleBot.send_message(\"337804063\", f\"Пользователь {message.chat.id} отправил жалобу: {message.text}\")\n db.set_user_status(0, user.id)\n return\n\n if user.status == -1:\n site = db.new_site(user.id, message.text)\n # href = 'https://0fc752a06314.ngrok.io'\n href = ServerConfiguration.HOST\n db.select_site(user.id, site.id)\n teleBot.send_message(message.chat.id,\n f\"Создан сайт {site.title}. Вот ссылочка: {href}/site/{site.slug}\",\n reply_markup=ACTION_KEYBOARD)\n db.set_user_status(0, user.id)\n return\n teleBot.send_message(message.chat.id, \"И куда это записывать? Выберите действие! \")\n return\n\n column = code_to_action(user.status)\n teleBot.send_message(message.chat.id, f\"Успешно! Можете вызвать меню /set ещё раз или \"\n f\"воспользоваться предыдущим для продолжения работы с сайтом \")\n\n if column == -1 or not column:\n teleBot.send_message(message.chat.id, \"Неизвестная ошибка сервера.\")\n return\n\n def prepear_text(text):\n return text.replace('$', ':dol:')\n\n data = {\n column: prepear_text(message.text),\n 'last_update': datetime.now()\n }\n\n db.update_site_data(user.selected, **data)\n\n # teleBot.send_message(message.chat.id)", "def text_e(self, event):\n directory=os.getcwd()+ '/messages'\n filename=str(self.user)+'_'+str(self.friend)\n text = self.text_send.GetValue()\n messages = mf.addMessage(self.user, self.friend, self.passw, text)\n mf.makeTextFile(self.user, self.friend, self.passw, messages)\n \n self.chat_log.LoadFile('/'.join((directory, filename)))\n self.text_send.SetValue(\"\")\n event.Skip()", "async def textemote(self, ctx, *, msg):\n try:\n await ctx.message.delete()\n except discord.Forbidden:\n pass\n\n if msg != None:\n out = msg.lower()\n text = out.replace(' ', ' ').replace('10', '\\u200B:keycap_ten:')\\\n .replace('ab', '\\u200B🆎').replace('cl', '\\u200B🆑')\\\n .replace('0', '\\u200B:zero:').replace('1', '\\u200B:one:')\\\n .replace('2', '\\u200B:two:').replace('3', '\\u200B:three:')\\\n .replace('4', '\\u200B:four:').replace('5', '\\u200B:five:')\\\n .replace('6', '\\u200B:six:').replace('7', '\\u200B:seven:')\\\n .replace('8', '\\u200B:eight:').replace('9', '\\u200B:nine:')\\\n .replace('!', '\\u200B❗').replace('?', '\\u200B❓')\\\n .replace('vs', '\\u200B🆚').replace('.', '\\u200B🔸')\\\n .replace(',', '🔻').replace('a', '\\u200B🅰')\\\n .replace('b', '\\u200B🅱').replace('c', '\\u200B🇨')\\\n .replace('d', '\\u200B🇩').replace('e', '\\u200B🇪')\\\n .replace('f', '\\u200B🇫').replace('g', '\\u200B🇬')\\\n .replace('h', '\\u200B🇭').replace('i', '\\u200B🇮')\\\n .replace('j', '\\u200B🇯').replace('k', '\\u200B🇰')\\\n .replace('l', '\\u200B🇱').replace('m', '\\u200B🇲')\\\n .replace('n', '\\u200B🇳').replace('ñ', '\\u200B🇳')\\\n .replace('o', '\\u200B🅾').replace('p', '\\u200B🅿')\\\n .replace('q', '\\u200B🇶').replace('r', '\\u200B🇷')\\\n .replace('s', '\\u200B🇸').replace('t', '\\u200B🇹')\\\n .replace('u', '\\u200B🇺').replace('v', '\\u200B🇻')\\\n .replace('w', '\\u200B🇼').replace('x', '\\u200B🇽')\\\n .replace('y', '\\u200B🇾').replace('z', '\\u200B🇿')\n try:\n await ctx.send(text)\n except Exception as e:\n await ctx.send(f'```{e}```')\n else:\n await ctx.send('Args req!', delete_after=3.0)", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def message(self, text):\n for char in text:\n if char == '\\n':\n self.cmd(0xC0) # next line\n else:\n self.cmd(ord(char),True)", "def message(self, text):\n for char in text:\n if char == '\\n':\n self.cmd(0xC0) # next line\n else:\n self.cmd(ord(char),True)", "def message(self, text):\n for char in text:\n if char == '\\n':\n self.cmd(0xC0) # next line\n else:\n self.cmd(ord(char),True)", "async def textemote(self, ctx, *, msg):\n await ctx.message.delete()\n if msg != None:\n out = msg.lower()\n text = out.replace(' ', ' ').replace('10', '\\u200B:keycap_ten:')\\\n .replace('ab', '\\u200B🆎').replace('cl', '\\u200B🆑')\\\n .replace('0', '\\u200B:zero:').replace('1', '\\u200B:one:')\\\n .replace('2', '\\u200B:two:').replace('3', '\\u200B:three:')\\\n .replace('4', '\\u200B:four:').replace('5', '\\u200B:five:')\\\n .replace('6', '\\u200B:six:').replace('7', '\\u200B:seven:')\\\n .replace('8', '\\u200B:eight:').replace('9', '\\u200B:nine:')\\\n .replace('!', '\\u200B❗').replace('?', '\\u200B❓')\\\n .replace('vs', '\\u200B🆚').replace('.', '\\u200B🔸')\\\n .replace(',', '🔻').replace('a', '\\u200B🅰')\\\n .replace('b', '\\u200B🅱').replace('c', '\\u200B🇨')\\\n .replace('d', '\\u200B🇩').replace('e', '\\u200B🇪')\\\n .replace('f', '\\u200B🇫').replace('g', '\\u200B🇬')\\\n .replace('h', '\\u200B🇭').replace('i', '\\u200B🇮')\\\n .replace('j', '\\u200B🇯').replace('k', '\\u200B🇰')\\\n .replace('l', '\\u200B🇱').replace('m', '\\u200B🇲')\\\n .replace('n', '\\u200B🇳').replace('ñ', '\\u200B🇳')\\\n .replace('o', '\\u200B🅾').replace('p', '\\u200B🅿')\\\n .replace('q', '\\u200B🇶').replace('r', '\\u200B🇷')\\\n .replace('s', '\\u200B🇸').replace('t', '\\u200B🇹')\\\n .replace('u', '\\u200B🇺').replace('v', '\\u200B🇻')\\\n .replace('w', '\\u200B🇼').replace('x', '\\u200B🇽')\\\n .replace('y', '\\u200B🇾').replace('z', '\\u200B🇿')\n try:\n await ctx.send(text)\n except Exception as e:\n await ctx.send(f'```{e}```')\n else:\n await ctx.send('Args req!', delete_after=3.0)", "def cipher_feedback(self):", "def horde_message(self, message):", "def test_to_text(self):\n tkeyring = dns.tsigkeyring.to_text(rich_keyring)\n self.assertEqual(tkeyring, text_keyring)", "def send_text_to_user(user):", "def absenden(self):\n\n message = self.textFeld.toPlainText()\n self.c.send(message)\n self.textFeld.clear()", "def get_keys(self, update, context):\r\n self.SECRET_KEY = update.message.text\r\n update.message.reply_text(text=f'Новый ключ: {self.SECRET_KEY}')\r\n return ConversationHandler.END", "def list_messages(self):", "def save(self, *args, **kwargs):\n data = self.cleaned_data #Gets the data from the form, stores it as a dict\n allUsers = Bruker.get_all_dict(Bruker)\n mottaker = allUsers[int(data['mottaker'])]\n melding = Messages(content=data['content'], author=self.getUser(), receiver=mottaker)\n melding.save()", "def getText(self):", "def message(self, text):\n\n if( rpi_device ):\n self.clear()\n for char in text:\n if char == '\\n' or char == '^':\n self.cmd(0xC0) # new line\n else:\n self.cmd(ord(char),True)", "def listener(messages):\n for m in messages:\n chatid = m.chat.id\n print(str(chatid))\n if m.content_type == 'text':\n text = m.text\n tb.send_message(chatid, text)", "def message(self, text):\n\n if type(text) in (bytes, str):\n T = text\n else:\n # list probably:\n T = '\\n'.join(text)\n print(('-'*60))\n print(T)\n print(('='*60))", "def text_example():\n \n text_store = \"01000001011000010010000001000010011000100000110100001010001100010011001000110011\"\n text.delete('1.0', tk.END) \n text.insert(tk.END, text_store) \n box=tk.Tk()\n m = tk.Message(box, text=\"You should be able to save this file and open it in a text editor like Notepad or Nano to read it. If you edit the values you may find it does not display properly as text. Unchanged, it should be interpreted by a text editor as:\\n\\nAa Bb\\n123\\n\\nAs the file was made on a Windows machines you may find other systems display the line breaks differently.\")\n m.config(padx=50, pady=50, width=350)\n m.pack()", "async def printtext(self, ctx: discord.ext.commands.Context, *args):\n message_channel: discord.abc.Messageable = ctx.message.channel\n if len(args) == 1:\n received_string = args[0]\n if received_string.startswith('\"') and received_string.endswith('\"'):\n received_string = received_string[1:-1]\n pos = received_string.find(\"\\\\\")\n if pos != -1 and received_string[pos + 1] != \" \":\n print(\"Error:\" + received_string[pos + 1])\n return\n pos = received_string.find(\"\\\"\")\n if pos != -1:\n print(\"Error:\" + received_string[pos + 1])\n return\n final_string = \"\"\n number_emoji = self.botVariables.numbers_emoji\n for c in received_string:\n if c.isalnum():\n try:\n val = int(c)\n if val < 10:\n final_string += number_emoji[val] + \" \"\n else:\n print(\"fatal Error!!!-\" + str(val))\n except ValueError:\n c = c.lower()\n if c == \"è\" or c == \"é\" or c == \"à\" or c == \"ù\" or c == \"ì\":\n final_string += c + \" \"\n else:\n final_string += \":regional_indicator_\" + c + \":\" + \" \"\n else:\n if c == \"!\" or c == \"?\" or c == \"#\":\n if c == \"!\":\n final_string += \":exclamation:\" + \" \"\n else:\n if c == \"#\":\n final_string += \":hash:\" + \" \"\n else:\n final_string += \":question:\" + \" \"\n else:\n final_string += c + \" \"\n await message_channel.send(final_string)\n else:\n await message_channel.send(\n \"**Usage:** \" + self.command_prefix + \"printtext \\\"phrase\\\", for more see \"\n + self.command_prefix + \"help printtext\")", "def devMsg(self, text):\n # Preprocess text\n lines = text.splitlines()\n\n image = self.devMsgImage.copy()\n draw = ImageDraw.Draw(image)\n # Text\n x0 = 0\n y0 = -2\n for i in range(0,len(lines)):\n draw.text((x0, y0+i*7), lines[i], font=self.font, fill=255)\n self.disp.image(image.rotate(180))\n self.disp.display()\n return", "def message(self, text):\n lines = str(text).split('\\n') # Split at newline(s)\n for i, line in enumerate(lines): # For each substring...\n if i > 0: # If newline(s),\n self.write_lcd(self.LCD_DATA_E1, 0xC0) # set DDRAM address to 2nd line\n self.write_lcd(self.LCD_DATA_E1, line, True) # Issue substring", "def comsume_msg(self, msg_type):", "def tag(self, sent):\n # WORK HERE!!", "def test_old_to_text(self):\n tkeyring = dns.tsigkeyring.to_text(old_rich_keyring)\n self.assertEqual(tkeyring, old_text_keyring)", "def __sendMessage(self):\n # TODO: Switch to this when implemented\n \n msg = self.ui.inputWidget.toPlainText()\n self.ui.inputWidget.clear()\n strv = StringView()\n strv.appendText(unicode(msg))\n self._amsn_conversation.sendMessage(strv)\n self.ui.textEdit.append(\"<b>/me says:</b><br>\"+unicode(msg)+\"\")", "def characters(self, data):\n pass", "async def 서버(self, ctx):\n if isinstance(ctx.channel, discord.DMChannel) or ctx.guild.id != 749595288280498188:\n return await ctx.send(f\"**여기로! {ctx.author.name} 🍻\\n<{self.config.botserver}>**\")\n\n await ctx.send(f\"**{ctx.author.name}** 이게 제 집이잖아요~ :3\")", "def getmessage(self, update, context):\r\n\r\n redirect_uri = \"https://thawing-ridge-47246.herokuapp.com\"\r\n\r\n # настройка соединения\r\n flow = Flow.from_client_secrets_file(\r\n 'credentials.json',\r\n scopes=SCOPES,\r\n redirect_uri=redirect_uri)\r\n\r\n code = self.get_code()\r\n\r\n flow.fetch_token(code=code, code_verifier=\"111\") # устанавливаем соединение с гуглом\r\n\r\n session = flow.authorized_session() # создаем сессию\r\n response = session.get('https://www.googleapis.com/gmail/v1/users/me/messages').json() # формируем запрос и получаем ответ сервера\r\n\r\n messages = response[\"messages\"]\r\n\r\n # у каждого из сообщений достаем id\r\n for message in messages[0:10]:\r\n mid = message['id']\r\n\r\n # получаем сообщение по id\r\n message_message = session.get(f'https://www.googleapis.com/gmail/v1/users/me/messages/{mid}').json()\r\n\r\n # информация об отправителе, получателе и теме сообщения хранится в ключе 'payload' --> 'headers'\r\n headers = message_message['payload']['headers']\r\n\r\n from_who = None\r\n to_whom = None\r\n subject = None\r\n\r\n for item in headers:\r\n if item['name'] == 'From':\r\n from_who = item['value']\r\n elif item['name'] == 'To':\r\n to_whom = item['value']\r\n elif item['name'] == 'Subject':\r\n subject = item['value']\r\n\r\n # ищем текст сообщения\r\n # достаем из сообщения его части\r\n message_payload_parts = message_message['payload']['parts']\r\n zero_part = message_payload_parts[0]\r\n\r\n if zero_part['mimeType'] == 'text/plain':\r\n self.message_without_attachments(context, message_payload_parts, from_who, to_whom, subject)\r\n elif zero_part['mimeType'] == 'multipart/alternative':\r\n self.message_with_attachments(session, mid, context, zero_part, message_payload_parts, from_who,\r\n to_whom, subject)\r\n\r\n context.bot.send_message(chat_id=update.message.chat_id, text=f'Done.')", "def message(self, key):\n msg = '[ensime] ' + feedback[key]\n self.raw_message(msg)", "def get_text(self):", "def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n\n\n lists,nextPageToken = ListMessages(service,user_id = 'me',q='subject:tradingview')\n # print (lists)\n mes,mes_str = GetMimeMessage(service,user_id = 'me',msg_id = lists[0]['id'])\n print (mes)\n\n\n j = 0\n for part in mes.walk(): \n j = j + 1 \n fileName = part.get_filename() \n contentType = part.get_content_type() \n mycode=part.get_content_charset(); \n # 保存附件 \n if fileName:\n print ('hhhhhhhhhhhhh')\n elif contentType == 'text/plain' or contentType == 'text/html': \n #保存正文 \n data = part.get_payload(decode=True) \n content=str(data); \n # if mycode=='gb2312': \n # content= mbs_to_utf8(content) \n #end if \n # nPos = content.find('降息') \n # print(\"nPos is %d\"%(nPos)) \n # print >> f, data \n # 正则替换掉所有非 <a></a>的标签 <[^>|a]+>\n # reg = re.compile('<[^>|a]+>')\n contentTxt = re.compile('<[^>|a]+>').sub('',content)\n print (reg.sub('',content))\n #end if \n\n\n \n # help(mes)\n # for i in mes.values():\n # print (i)\n # # print (mes[i]);\n # print (\"----------\")\n # print (mes['from'])\n # print (type (mes))\n # # print \n # parsed = Parser().parsestr(mes)\n # print (parsed)\n # print (mes)\n # for i in mes:\n # print (i)\n # for item in lists:\n # mes = GetMimeMessage(service,user_id = 'me',msg_id = item['id'])\n # # print (mes)\n # parsed = Parser().parsestr(mes)\n # print (parsed)", "def __message_content__(self) -> MessageContent:", "def cmd(self, message):\n pass", "def tweet(msg):\r\n m = \"\\n{}\\n\".format(msg)\r\n arcpy.AddMessage(m)\r\n print(m)\r\n print(arcpy.GetMessages())", "def on_accept(self, update, _context):\n self.updater.bot.send_message(\n chat_id=update.effective_chat.id,\n text=\"Alege timpul\",\n reply_markup=InlineKeyboardMarkup(k.build_dynamic_keyboard_first_responses()),\n )", "def controls(email):", "def regular_choice(update: Update, context: CallbackContext) -> int:\n expected = ['Public_key', 'Quantity', 'Secret_Key', 'Note']\n text = update.message.text\n for b in expected:\n if text == b:\n user_d[b] = text\n update.message.reply_text(f'Enter {text.lower()}?')\n\n return TYPING_REPLY", "def reveal(self):\n content = self.password.get()\n \n if content == \"password\":\n message = \"You have access to something special.\"\n else:\n message = \"Access denied.\"\n \n self.text.delete(0.0, END) # delete previous message\n self.text.insert(0.0, message) #insert your message at position row 0, colomn 0", "def handle_message(self, message):", "def tweet(msg):\n m = \"\\n{}\\n\".format(msg)\n arcpy.AddMessage(m)\n print(m)\n print(arcpy.GetMessages())", "def on_typing_pm(self, data):\n # data[4] = ?\n print ('%s is typing a private message.' % data[3])", "def hack_message(self):\r\n\t\t#Will not let user input useless messages that cannot be hacked.\r\n\t\twhile True:\r\n\t\t\tself.message = input(\"Please enter a message you would like to hack. --> \")\r\n\t\t\tif self.message != \"\" and len(self.message) > 4:\r\n\t\t\t\tbreak\t\t\t\r\n\t\tmax_key = len(self.message)\r\n\t\tself.i = 1\r\n\t\tpotential_hits = []\r\n\t\t#Runs through all potential keys. \r\n\t\tfor self.i in range(1, max_key):\r\n\t\t\tprint(f\"Trying key #{self.i}\")\t\t\t\r\n\t\t\tself.my_code = Decryptor(self.message, self.i).transfer_decrypt()\r\n\t\t\tself.hack_plausible = False\r\n\t\t\tself.verify_hack_key()\r\n\t\t\tif self.hack_plausible:\r\n\t\t\t\tpotential_hits.append(f\"Key #{self.i} yeilded {self.percent_english}% english words after decryption.\\n\" + \"\\t\" + self.my_code[:50])\r\n\t\tprint(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\")\r\n\t\tprint(\"Hacking results:\\n\")\r\n\t\tfor hit in potential_hits:\r\n\t\t\tprint(\"\\t\" + hit + \"|\\n\")", "def dothis(message) -> str:\n system: ChatSystem = message.cls.main_system\n session = system.db_session.create_session()\n was = message.get_setting(session, 'news')\n was_set = literal_eval(was.value) if was else set()\n tags = set(message.params)\n if message.params:\n if message.params[0].isdigit():\n n = int(message.params[0])\n le = 0\n ans = ''\n for i, item in enumerate(system.module_news):\n item_lower = item[0].lower()\n if not tags or (item[1] in tags or any(\n map(lambda x: x in item_lower, tags))):\n le1 = len(item[0])\n if le1 + le > 4096:\n return ans\n elif i not in was_set:\n was_set.add(i)\n ans += item[0]\n le += le1\n break\n if not ans:\n return 'Ничего не найдено'\n if was:\n was.value = str(was_set)\n session.commit()\n else:\n message.add_setting(session, 'news', str(was_set))\n return ans", "def _txt_record(self, name, content):\n\n return {\n \"name\": name,\n \"type\": \"TXT\",\n \"aux\": None,\n \"ttl\": MetanameApiClient.minimum_ttl,\n \"data\": content,\n }", "def list_atms(bot, update, chat_data):\n\n\tchat_id = update.message.chat_id\n\tchat_data[chat_id] = {'command': update.message.text, 'location':{}}\n\treply_markup = telegram.ReplyKeyboardMarkup([[telegram.KeyboardButton('Enviar Ubicación', request_location=True)]])\n\tbot.sendMessage(chat_id, 'Por favor, envie su ubicación', reply_markup=reply_markup)", "def receive_message(self, message):", "def botones_especiales():\n texto = '• CONFIRMAR: Una vez colocada una palabra sobre el tablero, verifica si esa \\n'+\\\n 'palabra es válida, en caso de serlo se sumarán puntos al puntaje del jugador,\\n'+\\\n 'de lo contrario las fichas usadas volverán al atril.\\n'+\\\n '\\n• DESHACER: Permite devolver al atril las fichas que se hayan puesto en el \\n'+\\\n 'tablero en este turno.\\n\\n• TERMINAR: Finaliza la partida.\\n'+\\\n '\\n• CAMBIAR FICHAS: Permite seleccionar las fichas.\\n'+\\\n '\\n• POSPONER: Guarda el estado del juego hasta el momento (fichas, puntos,\\n'+\\\n 'palabras jugadas, etc) para poder continuar la partida luego.\\n'+\\\n '\\n• PASAR TURNO: Permite cederle el turno a la máquina.'\n return texto", "def save_massage(self, text: str, sender_username: str):\n self.sql_lock.acquire()\n query: str = \"INSERT INTO messages VALUES(?, ?, NULL)\"\n if len(text) > 0:\n self.cursor.execute(query,(text, sender_username))\n self.connection.commit()\n self.sql_lock.release()\n return {\"saved\": True, \"type\": \"uploaded successfully\"}\n else:\n self.sql_lock.release()\n return {\"saved\": False, \"type\": \"could not upload an empty message!!\"}", "def test_im_chat_messages(self):\n pass", "def get_result(self):\n print('''message: {}\nopen key: ({}, {})\nencoded message: {}'''.format(self.msg, self.n, self.e, self.__encoded_msg))", "def test_from_and_to_text(self):\n rkeyring = dns.tsigkeyring.from_text(text_keyring)\n tkeyring = dns.tsigkeyring.to_text(rkeyring)\n self.assertEqual(tkeyring, text_keyring)", "def d_sendText(self, messageText):\n #print \"send message %s\" % messageText\n self.sendUpdate(\"sendText\", [messageText])", "def get_message():\n msg = str(input('-- Input the message: '))\n msg = msg.split()\n x = []\n for i in msg:\n if i == 'ESC':\n x.append('10100011')\n x.append('10100011')\n elif i == 'FLAG':\n x.append('10100011')\n x.append('01111110')\n else:\n try:\n x.append(format(ord(i),'08b'))\n except(TypeError):\n print(Exception)\n print(\"Entered non char value in message\")\n return None\n return x", "def handle_text_messages(self, update, context):\n\n # Split user input into single words\n words = set(update.message.text.lower().split())\n logging.debug(f'Received message: {update.message.text}')\n\n # For debugging: Log users that received something from bot\n chat_user_client = update.message.from_user.username\n if chat_user_client == None:\n chat_user_client = update.message.chat_id\n\n\n # Possibility: received command from menu_trigger\n for Trigger in self.menu_trigger:\n for word in words:\n if word.startswith(Trigger):\n\n self.show_menu(update, context)\n logging.info(f'{chat_user_client} checked out the menu!')\n\n return\n\n\n # Possibility: received command from loan_stats_trigger\n for Trigger in self.loan_stats_trigger:\n for word in words:\n if word.startswith(Trigger):\n\n #self.send_textfile('under_construction.txt', update, context)\n self.show_loan_stats(update, context)\n self.send_signature(update, context)\n logging.info(f'{chat_user_client} got loan stats!')\n\n return\n\n # Possibility: received command from il_trigger\n for Trigger in self.il_trigger:\n for word in words:\n if word.startswith(Trigger):\n\n self.send_textfile('under_construction.txt', update, context)\n #self.show_il(update, context)\n #self.send_signature(update, context)\n logging.info(f'{chat_user_client} tried to get IL info!')\n\n return\n\n # Possibility: received command from assets_trigger\n for Trigger in self.assets_trigger:\n for word in words:\n if word.startswith(Trigger):\n\n self.send_textfile('under_construction.txt', update, context)\n #self.self.show_assets(update, context)\n #self.send_signature(update, context)\n logging.info(f'{chat_user_client} tried to get asset info!')\n\n return", "def keyboard(user_id):\n\tkb = types.ReplyKeyboardMarkup(resize_keyboard = True)\n\tprocess = \"Товары в призводстве\"\n\trequest = \"Созданные товары\"\n\tadd_material = \"Добавить материал\"\n\ttake_order = \"Создать запрос\"\n\ttxt = \"Внизу у Вас появится меню\"\n\t#keyboard for adm\n\tif user_id == config.adm_id_1 or config.adm_id_2:\n\t\tkb.add(process)\n\t\tkb.add(request)\n\t\tkb.add(add_material)\n\t\tkb.add(take_order)\n\t\treturn kb, txt \n\t#keyboard for store\n\telif user_id == config.store_id:\n\t\tkb.add(process)\n\t\tkb.add(request)\n\t\tkb.add(take_order)\n\t\treturn kb, txt\n\t#keyboard for factory\n\telif user_id == config.factory_id:\n\t\tkb.add(process)\n\t\treturn kb, txt", "def send_to_outputfield(self, message):\n\n try:\n # First strip characters outside of range\n # that cannot be handled by tkinter output field\n char_list = ''\n for x in range(len(message)):\n if ord(message[x]) in range(65536):\n char_list += message[x]\n message = char_list\n except Exception as e:\n logging.error(str(e))\n logging.exception(\"Exception : \")\n try:\n self.output.insert(END, message + \"\\n\")\n except Exception as e:\n logging.error(str(e))\n logging.exception(\"Exception : \")", "def decode_message(self, key):\n\n decoded_message = ''\n for char in self.message:\n if char.isalpha():\n decoded_char = self.convert_char(char, key)\n decoded_message = decoded_message + decoded_char\n else:\n decoded_message = decoded_message + char\n return decoded_message", "def message_handler(message):\n location = database.get_location(message.from_user.id)\n if not location:\n return {\"text\": \"Для поиска лекарств отправь своё местоположение\"}\n\n return get_drugs_message(find_drugs(message.text.encode('utf-8')))", "def on_chat(self, event, text):\n return None", "def text_message(self, update, context):\n # check mode\n if self.adding_meals:\n # text from the message is retrieved\n typed_meal = update.message.text\n # we get the instance from the meal list. It might be None\n meal = self.meal_list.get(typed_meal)\n try:\n # might produce an AttributeError if ingridients is None\n # every ingridient in the meal is checked\n for ingridient in meal.ingridients:\n # if it's already in self.list the quantity increases\n if ingridient.name in self.list.keys():\n self.list[ingridient.name][1] += 1\n else:\n # the instance is added to the list\n self.list[ingridient.name] = [ingridient, 1]\n # the list is transformed to text\n to_write = functions.list_to_text(sorted(self.list.values(),\n key=lambda x: x[0].category))\n except AttributeError:\n to_write = MESSAGES[\"meal_error\"]\n # message is send\n self.send_message(update, context, to_write)\n # check mode\n elif self.adding_ingridients:\n # text from the message is retrieved\n typed_ingridient = update.message.text\n # we get the instance from the ingridients list. It might be None\n ingridient = self.ingridients.get(typed_ingridient)\n try:\n # might produce an AttributeError if ingridients is None\n # if it's already in self.list the quantity increases\n if ingridient.name in self.list.keys():\n self.list[ingridient.name][1] += 1\n else:\n # the instance is added to the list\n self.list[ingridient.name] = [ingridient, 1]\n # the list is transformed to text\n to_write = functions.list_to_text(sorted(self.list.values(),\n key=lambda x: x[0].category))\n except AttributeError:\n to_write = MESSAGES[\"add_ingridient_error\"]\n # message is send\n self.send_message(update, context, to_write)\n # check mode\n elif self.removing_ingridients:\n # text from the message is retrieved\n typed_ingridient = update.message.text\n try:\n # might produce a KeyError if typed_meal is not in self.list\n # decreases amounot of the ingridient\n self.list[typed_ingridient][1] -= 1\n # remove igridient from list when the quantity is 0\n if self.list[typed_ingridient][1] == 0:\n del self.list[typed_ingridient]\n # the list is transformed to text\n to_write = functions.list_to_text(sorted(self.list.values(),\n key=lambda x: x[0].category))\n except KeyError:\n to_write = MESSAGES[\"remove_ingridient_error\"]\n # message is send\n self.keyboard = \"remove_ingridients\"\n self.send_message(update, context, to_write)", "def __process_text(self, cipher_key, message):\n output = []\n # process text\n for letter in message:\n if letter not in cipher_key:\n output.append(letter)\n else:\n output.append(cipher_key[letter])\n\n return ''.join(output)", "def read_message(answer) :\n l = []\n for letter in answer:\n if letter==255:\n break\n l.append(chr(letter))\n\n message=\"\".join(l)\n\n return message", "def __send(self) -> None:\n # region Docstring\n # endregion\n\n if len(self.entryline.get_text().strip()) > 0:\n self.udp.transmission(\n \"CHA\", \"01\", self.username, self.entryline.get_text().strip()\n )\n self.__addmsg(f\"<b>(YOU): </b><br>{self.entryline.get_text().strip()}<br>\")\n self.entryline.set_text(\"\")", "def obtain_text():\n pass", "def send_text(self, data: str) -> None:", "def insertall_message(self, text):\n return self.insertall([{'logging': text}])", "async def badman(self, ctx):\n await ctx.message.edit(content=\"̿̿ ̿̿ ̿̿ ̿'̿'\\̵͇̿̿\\з= ( ▀ ͜͞ʖ▀) =ε/̵͇̿̿/’̿’̿ ̿ ̿̿ ̿̿ ̿̿\")", "def text_reply(msg):\n if msg['Type'] != TEXT:\n # sanitize the text field so that we can assume it always contains string.\n # and this is also to avoid infinite loop during serialization in the persist function\n msg['Text'] = msg['Type']\n\n to_user_id_name = msg['ToUserName']\n from_user_id_name = msg['FromUserName']\n\n if is_my_outgoing_msg(msg):\n handle_outgoing_msg(msg, to_user_id_name)\n else: # this is an incoming message from my friend\n handle_incoming_msg(msg, from_user_id_name)", "def on_text_message(self, update, context):\n chat_id = update.effective_chat.id\n log.info(\"Msg from:%s `%s`\", chat_id, update.effective_message.text)\n\n if context.user_data[\"state\"] == c.State.EXPECTING_AMOUNT:\n log.info(\"Vol:%s spent %s MDL on this request\", chat_id, update.effective_message.text)\n # TODO validate the message and make sure it is a number, discuss whether this is necessary at all\n # TODO send this to the server, we need to define an API for that\n request_id = context.user_data[\"current_request\"]\n\n # Write this amount to the persistent state, so we can rely on it later\n context.bot_data[request_id][\"amount\"] = update.effective_message.text\n\n # Then we have to ask them to send a receipt.\n self.send_message_ex(update.message.chat_id, c.MSG_FEEDBACK_RECEIPT)\n context.user_data[\"state\"] = c.State.EXPECTING_RECEIPT\n return\n\n if context.user_data[\"state\"] == c.State.EXPECTING_FURTHER_COMMENTS:\n log.info(\"Vol:%s has further comments: %s\", chat_id, update.effective_message.text)\n request_id = context.user_data[\"current_request\"]\n context.bot_data[request_id][\"further_comments\"] = update.effective_message.text\n self.finalize_request(update, context, request_id)\n return\n\n if context.user_data[\"state\"] == c.State.EXPECTING_PROFILE_DETAILS:\n self.build_profile(update, context, raw_text=update.effective_message.text)\n return\n\n # if we got this far it means it is some sort of an arbitrary message that we weren't yet expecting\n log.warning(\"unexpected message ..........\")", "def sending_keys(conn, cur):\r\n cur = conn.cursor()\r\n cur.execute(\"SELECT * FROM mythic_key\")\r\n content = cur.fetchall()\r\n cur.close()\r\n\r\n data = {\r\n \"embeds\": [\r\n {\r\n \"title\": \"Global keys\",\r\n \"fields\": [\r\n {\r\n \"name\": 'Personnages',\r\n \"value\": '\\n'.join(\r\n i[0] + \" - \" + i[1] for i in content\r\n ),\r\n \"inline\": 'true',\r\n },\r\n {\r\n \"name\": 'Donjon',\r\n \"value\": '\\n'.join(i[2] for i in content),\r\n \"inline\": 'true',\r\n },\r\n {\r\n \"name\": 'Level',\r\n \"value\": '\\n'.join(f\"+{i[3]}\" for i in content),\r\n \"inline\": 'true',\r\n },\r\n ],\r\n \"color\": 7741329,\r\n \"timestamp\": str(datetime.datetime.now()),\r\n }\r\n ],\r\n \"username\": \"Esclave Purotin\",\r\n }\r\n\r\n url = 'ENTER YOUR WEBHOOK ID HERE'\r\n requests.post(url, json=data, headers={\"Content-Type\": \"application/json\"})", "def carrega_mensagem(self, codigo):\r\n if not codigo:\r\n raise Exception(\"709 - Parametros insuficientes!\")\r\n \r\n ## Futuramente salvar as mensagens em um arquivo.\r\n mensagem = {700:\"Duplicidade de NFCe com diferrenca\" + \r\n \" na chave de acesso.\",\r\n 701:\"Danfe Autorizado mas houve erro na impressao!\" + \\\r\n \" Foi cancelado!\",\r\n 702:\"Impressora esta OFF-LINE!\",\r\n 703:\"Impressora sem papel!\",\r\n 704:\"Impressora nao esta operacional!\",\r\n 705:\"XML de envio fora do padrao!\",\r\n 706:\"Rejeicao: Duplicidade de Evento!\",\r\n 707:\"Falha ao dar permissao no arquivo!\",\r\n 708:\"Tag nao encontrada!\",\r\n 709:\"Parametros insuficientes!\",\r\n 710:\"Codigo nao catalogado!\",\r\n 711:\"Parametro nao encontrado!\",\r\n 712:\"Sem comunicacao com Web Service!\",\r\n 713:\"Rejeicao: NFC-e autorizada a mais de 24 horas\",\r\n 714:\"Nao foi possivel dar permissao na porta de impressao!\",\r\n 715:\"Falha ao configurar velocidade da porta de impressao!\",\r\n\t\t 716:\"ATENCAO: Danfe Autorizado mas houve um erro\" +\r\n \" na impressao!\",\r\n\t\t 717:\"Impressora esta OFF-Line e nao foi possivel dar \" +\r\n\t\t \"permissao na porta\",\r\n\t\t 718:\"Erro ao tentar invocar servico web!\",\r\n\t\t 719:\"Documento NFCe anterior pendente!\",\r\n\t\t 720:\"Falha na estrutura do evento enviado!\",\r\n\t\t 721:\"NFCe nao em fase de encerramento ou \" + \r\n\t\t \"Erro ao tentar invocar o servico!\",\r\n 722:\"Documento possui uma serie diferente da utilizada no PDV\",\r\n 723:\"Chave de comunicao invalida\",\r\n 798:\"Rejeicao: NFC-e com Data-Hora de emissao atrasada.\" + \\\r\n \"|ou Duplicidade na chave de acesso.\",\r\n 799:\"ERRO nao catalogado!\",\r\n }\r\n if codigo not in mensagem.keys():\r\n raise Exception(\"710 - Codigo nao catalogado!\")\r\n return {\"codigo\": codigo, \"mensagem\": mensagem[codigo]}", "def get_message(self):\n\n if self.gotten: return\n self.get_recipients()\n self.get_text()\n self.get_price()\n self.get_files()\n self.set_text()\n if Settings.get_performer_category() or self.hasPerformers:\n self.get_performers()\n else:\n self.performers = \"unset\"\n self.gotten = True", "def tijd(update, context):\r\n msgContent = str(update['message']['text']).split(' ')\r\n OVNummer = int(msgContent[1]) # OV nummer uit bericht lezen\r\n context.bot.send_message(chat_id=update.effective_chat.id, text=fietsStalTijd(OVNummer))", "def text(self) -> str:", "def create_preview(message):", "def text(message):\n global list_messages\n room = session.get('room')\n msg = session.get('name') + ':' + message['msg']\n list_messages.append(msg)\n addNewMsg(message,session)\n print ('size of list_messages ' + str(len(list_messages)) + ', session ' + str(session))\n emit('message', {'msg': msg}, room=room)", "def bot_ce(mess, nick, botCmd):\n path = \"/usr/bin/\"\n \"\"\"Look up word in dict via sdcv\"\"\"\n if (len(botCmd) == 1):\n message = u\"/me says:“Please type in format: ‘!d word’”\"\n else:\n word = botCmd[1]\n cmd = path + \"sdcv --utf8-output --utf8-input -n '\" + word +\"'\"\n result = os.popen(cmd.encode(\"UTF-8\"), \"r\").read()\n if result:\n if result.count('-->') > 1:\n # firstArrowPosition = result.find('-->')\n # secondArrowPosition = result.find('-->', firstArrowPosition + 3)\n # result = result[:secondArrowPosition]\n message = '/me says:\\n' + result\n else:\n message = self.optFail(u\"Word not found.\")\n return message", "def display_message():", "def handleMessage(msg):", "def on_commitMessageEdit_textChanged(self):\n self.__updateOK()", "def format(self, message):", "def prijsTg(update, context):\r\n msgContent = str(update['message']['text']).split(' ')\r\n OVNummer = int(msgContent[1]) # OV nummer uit bericht lezen\r\n context.bot.send_message(chat_id=update.effective_chat.id, text=kluisInfoTg(OVNummer))", "def encode_text():\n print(f\"{YELLOW}[{MIDDLE_DOT}]{RESET} Enter message to encode: \", end=\"\")\n message = input()\n encoded = LEFT_TO_RIGHT_MARK\n for message_char in message:\n code = '{0}{1}'.format('0' * padding, int(str(to_base(\n ord(message_char), len(zero_space_symbols)))))\n code = code[len(code) - padding:]\n for code_char in code:\n index = int(code_char)\n encoded = encoded + zero_space_symbols[index]\n\n encoded += RIGHT_TO_LEFT_MARK\n\n pyperclip.copy(encoded)\n print(f\"{GREEN}[+]{RESET} Encoded message copied to clipboard. {GREEN}[+]{RESET}\")", "def _get_plain_message (self) :\n return self._message", "def save_info(self):\n if len(self.password_text.text()) < 8:\n message = Message(self.language[\"inv_pass\"], self.language[\"pass_not_long\"])\n warning_message = message.create_iw_message(self.language[\"ok\"], \"warning\")\n warning_message.exec()\n else:\n data_acces = DbMethods()\n response = data_acces.change_user_information(self.username,\n Hash.encrypt(self.password_text.text()))\n\n if response == True:\n message = Message(\n self.language[\"success\"], self.language[\"act_info\"])\n information_message = message.create_iw_message(\n self.language[\"ok\"], \"information\")\n information_message.exec()\n else:\n message = Message(self.language[\"error\"], self.language[\"inf_error\"])\n warning_message = message.create_iw_message(self.language[\"ok\"], \"warning\")\n warning_message.exec()\n self.close()", "def message():\n if request.method == 'POST':\n db.log_msg(request.form['text'], request.cookies.get('username'))\n return db.get_all_messages()", "def received_information(update: Update, context: CallbackContext) -> int:\r\n user_data = context.user_data\r\n text = update.message.text\r\n category = user_data['choice']\r\n user_data[category] = text\r\n del user_data['choice']\r\n\r\n update.message.reply_text(\r\n \"Genial, tu pedido está avanzando de esta manera:\"\r\n f\"{facts_to_str(user_data)}Puedes agregar algún comentario o cambio en tu orden en Comentarios...\",\r\n reply_markup=markup,\r\n )\r\n\r\n return CHOOSING", "def get_encoded_msg():\n print(\"Enter text you would like to decode:\\n\")\n e_msg = input(\">\")\n return e_msg", "def _encode_text(self):\n\n print(f\"Hex encode; received message is {self.message}\")\n return self.message.encode(\"utf-8\").hex()", "def get_text(self):\n if type(self.message) == WebElement:\n try:\n return self.message.find_element(By.XPATH,\n \".//span[@class='_3-8er selectable-text copyable-text']/span\").text\n except NoSuchElementException:\n return \"message_deleted\"", "def encode_message(self, **kwargs):\r\n\r\n\t\tif kwargs[\"action\"] == \"NO\":\r\n\r\n\t\t\tself.send_message(\"|%s|%s|\" % (kwargs[\"action\"], kwargs[\"selected_name\"]))\r\n\r\n\t\telif kwargs[\"action\"] in [\"ME\",\"UR\"]:\r\n\r\n\t\t\tself.send_message(\"|%s|%s|\" % (kwargs[\"action\"], kwargs[\"message\"]))\r\n\r\n\t\telif kwargs[\"action\"] == \"LA\":\r\n\r\n\t\t\tself.send_message(\"|LA|\")\r\n\r\n\t\telif message_split[0] == \"CH\":\r\n\r\n\t\t\tpass\r\n\t\t\t\r\n\t\telse:\r\n\t\t\tself._window.open_dialog(\"Impossible d'envoyer un message\",\r\n\t\t\t\t\t\t\t\t\t \"Le message suivant n'a pas pu être envoyé car mal encodé : {}\".format(kwargs),\r\n\t\t\t\t\t\t\t\t\t type=\"warning\")\r\n\t\t\tprint(\"Error during encoding with arguments : %s\" % kwargs)", "def encrypt_message(self):\r\n\t\t#Will not let user input useless messages that cannot be encrypted.\r\n\t\twhile True:\r\n\t\t\tself.message = input(\"Please enter a message you would like to encrypt. --> \")\r\n\t\t\tif self.message != \"\" and len(self.message) > 4:\r\n\t\t\t\tbreak\r\n\t\tself.setup_key_encrypt()\r\n\t\tmy_code = Encryptor(self.message, self.key)\r\n\t\tprint(my_code.transfer_encrypt()+ \"|\")" ]
[ "0.57340914", "0.56032443", "0.5596878", "0.55474406", "0.5542437", "0.5542437", "0.5542437", "0.5542437", "0.5542437", "0.55125624", "0.55125624", "0.55125624", "0.5512051", "0.5425027", "0.54236233", "0.54012054", "0.5378505", "0.5372138", "0.5365111", "0.5340748", "0.53144115", "0.53095824", "0.5297867", "0.5293144", "0.5271349", "0.525883", "0.5175932", "0.5171966", "0.5171274", "0.51685065", "0.51477647", "0.51340765", "0.51196724", "0.51032835", "0.510306", "0.51027477", "0.5085875", "0.50808614", "0.507828", "0.5077054", "0.5069599", "0.5060163", "0.5050311", "0.50248027", "0.5022371", "0.4982007", "0.4980446", "0.49698365", "0.49673232", "0.49636444", "0.4962885", "0.49530113", "0.49502197", "0.4949176", "0.49440283", "0.49426013", "0.49407977", "0.49407142", "0.49359384", "0.49309763", "0.49254525", "0.49192247", "0.49171942", "0.4909064", "0.49090594", "0.48987904", "0.48937514", "0.48902732", "0.48837286", "0.48785523", "0.48775804", "0.4869776", "0.48667377", "0.48665947", "0.48637024", "0.48620597", "0.48492354", "0.48457813", "0.48414928", "0.48361582", "0.48332542", "0.48253623", "0.4825235", "0.48120475", "0.48094893", "0.48089314", "0.48071855", "0.47973058", "0.47942904", "0.4793093", "0.47916952", "0.47883663", "0.47872135", "0.4783946", "0.4781076", "0.47645378", "0.47626847", "0.47612098", "0.47610077", "0.47607535" ]
0.6025704
0
Test hiding the library.
def test_privatize_library(self): g = Game() g.add_player(uuid4(), 'p1') g.add_player(uuid4(), 'p2') gs = g gs.library.set_content(cm.get_cards( ['Circus', 'Circus', 'Circus', 'Circus Maximus', 'Circus', 'Circus', 'Ludus Magna', 'Ludus Magna', 'Statue', 'Coliseum', ])) gs_private = g.privatized_game_state_copy('p1') self.assertFalse(gs_private.library.contains('Circus')) self.assertEqual(gs_private.library, Zone([Card(-1)]*len(gs_private.library), name='library'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_module(self):\n pass", "def __test__():\n#-------------------------------------------------------------------------------\n import pylib.tester as tester\n return 0", "def importlib_only(fxn):\n return unittest.skipIf(using___import__, \"importlib-specific test\")(fxn)", "def test_plot_ay_imported():\n assert \"plot_ay\" in sys.modules", "def test_package(self):\n pass", "def test_example(decorated_example):\n import visual_coding_2p_analysis", "def test_molecool_imported():\n assert \"molecool\" in sys.modules", "def test_imports():\n assert False", "def test_ufedmm_imported():\n assert \"ufedmm\" in sys.modules", "def test_parrot_imported():\n assert \"parrot\" in sys.modules", "def test_stub(self):\n pass", "def mockup(cls):\n pass", "def test_deprecated_cant_find_module() -> None:\n with patch(\"inspect.getmodule\", return_value=None):\n # This used to raise.\n cv.deprecated(\n \"mars\",\n replacement_key=\"jupiter\",\n default=False,\n )\n\n with patch(\"inspect.getmodule\", return_value=None):\n # This used to raise.\n cv.removed(\n \"mars\",\n default=False,\n )", "def testImport(self):\n success = False\n try:\n from cutlass import DiseaseMeta\n success = True\n except:\n pass\n\n self.failUnless(success)\n self.failIf(DiseaseMeta is None)", "def _test():\n import doctest", "def test_should_implement(self):\n pass", "def AllInternalsVisible(self) -> bool:", "def test_qm_project_python_testing_imported():\n assert \"qm_project_python_testing\" in sys.modules", "def test(self):\n raise NotImplementedError", "def unitary_test():", "def dl():\n raise NotImplementedError()", "def test_defining_only_and_defer_fails(self):", "def test_unloadable(self):\n pass", "def test_absent_imports():\n module, HABEMUS_MODULE = optional_import(\"not_real_module\")\n\n assert not HABEMUS_MODULE\n assert module.__name__ == \"not_real_module\"\n with pytest.raises(ModuleNotFoundError):\n _ = module.layers", "def test_class_internal(self):\n fwa = FakeWikiArchivo(\n 'abcd <a href=\"/wiki/foobar\" class=\"internal\">FooBar</a> dcba'\n )\n _, r = self.peishranc(fwa)\n self.assertEqual(r, [])", "def test_library(self):\n self.assertEqual(LibraryConfig.name, \"library\")", "def test_version():\n assert(hasattr(tekel, '__version__'))", "def test_xchemOT_imported():\n assert \"xchemOT\" in sys.modules", "def _test(self):\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def unavailable_importer(**kwargs):\n return LazyImportTester(\"_qiskit_this_module_does_not_exist_\", **kwargs)", "def hide(obj):\n obj._spec__is_private = True\n return obj", "def _test():\n import doctest\n doctest.testmod()", "def _test():\n import doctest\n doctest.testmod()", "def test_rlmm_imported():\n assert \"rlmm\" in sys.modules", "def test_vendored_libjuju(self):\n for name in sys.modules:\n if name.startswith(\"juju\"):\n module = sys.modules[name]\n if getattr(module, \"__file__\"):\n print(getattr(module, \"__file__\"))\n assert re.search('n2vc', module.__file__, re.IGNORECASE)\n\n # assert module.__file__.find(\"N2VC\")\n # assert False\n return", "def testable(self):\n return False", "def test_redirection_weldx_widgets_not_found():\n orig_import = __import__ # Store original __import__\n\n def import_mock(name, *args, **kwargs):\n if \"weldx_widgets\" in name:\n raise ModuleNotFoundError(\"weldx_widgets not found\")\n if \"matplotlib\" in name:\n raise ModuleNotFoundError(\"matplotlib not found\")\n return orig_import(name, *args, **kwargs)\n\n pattern = \".*weldx_widget.*unavailable\"\n\n with patch(\"builtins.__import__\", side_effect=import_mock):\n with pytest.warns(match=pattern):\n import weldx.visualization as vs\n\n # ensure that using declared features emits the warning again.\n for name in vs.__all__:\n with pytest.warns(match=pattern):\n obj = getattr(vs, name)\n obj()", "def test_S16_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import S16 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.S16\", test]", "def test_analyzer():\n import analyzer\n\n analyzer # Fake usage.", "def test_init_client(self):\n # TODO: dynamically importing dependancies from the file tested\n self.assertIn(\n \"describe_trusted_advisor_check_result\", dir(self.subclass.client)\n )", "def test_imports():\n import sys\n import src\n assert 'sklearn.feature_extraction' not in sys.modules.keys()", "def test_import():\n import chrisbrake\n assert chrisbrake", "def test_module():\n query = {\n 'operator': 'all',\n 'children': [\n {\n 'field': 'sample.malware',\n 'operator': 'is',\n 'value': 1\n }\n ]\n }\n\n do_search('samples', query=query, scope='Public', err_operation='Test module failed')\n return", "def test_py_volume(self):\n self._test_py_compile('volume')", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def test_WW95_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import WW95 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.WW95\", test]", "def test_molssi_project_imported():\n assert \"molssi_project\" in sys.modules", "def test(self):\n self.skipped_test('doctest module has no DocTestSuite class')", "def test_LC18_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import LC18 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.LC18\", test]", "def test_submodule(submodule):\n is_correct_subclass = issubclass(submodule, AnalysisModule)\n # Ensure submodule is defined within the package we are inspecting (and not 'base')\n is_correct_module = package.__name__ in submodule.__module__\n return is_correct_subclass and is_correct_module", "def test_ensureWhenNotImportedDontPrevent(self):\n modules = {}\n self.patch(sys, \"modules\", modules)\n ensureNotImported([\"m1\", \"m2\"], \"A message.\")\n self.assertEqual(modules, {})", "def tearDownModule():\n pass\n # logPoint('module %s' % __name__)", "def test_deprecations():\n with pytest.deprecated_call():\n from aesara.tensor.subtensor_opt import get_advsubtensor_axis # noqa: F401 F811", "def test_valid_python():\n from decisionengine.framework.util import reaper # noqa: F401\n\n pass", "def test_NKT13_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import NKT13 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.NKT13\", test]", "def test_required_methods(self):", "def tearDown(self):\n builtins.__import__ = self.original_imports", "def test_all_no_class(self):", "def test_all_no_class(self):", "def example_function_in_example_module():\n pass", "def test_import():\n assert hasattr(waves, 'wave_number')", "def testmodule():\n import doctest\n import sys\n thismodule = sys.modules[__name__]\n return doctest.testmod(m=thismodule)", "def testmodule():\n import doctest\n import sys\n thismodule = sys.modules[__name__]\n return doctest.testmod(m=thismodule)", "def test_patch_none():", "def test_subclass():\n assert issubclass(BlockOnSpring, PhysicsModule)", "def test_check_module(self) -> None:\n check_module(\"os\")", "def test_import_kedro_viz_with_no_official_support_emits_warning(mocker):\n mocker.patch(\"kedro_viz.sys.version_info\", (3, 12))\n\n # We use the parent class to avoid issues with `exec_module`\n with pytest.warns(UserWarning) as record:\n kedro_viz.__loader__.exec_module(kedro_viz)\n\n assert len(record) == 1\n assert (\n \"\"\"Please be advised that Kedro Viz is not yet fully\n compatible with the Python version you are currently using.\"\"\"\n in record[0].message.args[0]\n )", "def test_instantiates_badgr_lite_class(self):\n badgr = self.get_badgr_setup()\n self.assertIsInstance(badgr, BadgrLite)", "def test(self):\n pass", "def __integration_doctest():\n pass", "def test_mmelemental_imported():\n import sys\n\n assert \"mmelemental\" in sys.modules", "def _test():\n import doctest\n doctest.testmod(verbose=1)", "def is_nuke():\n try:\n import _nuke\n return True\n except ImportError:\n return False", "def testable(self):\n\t\treturn True", "def test_find_module_py33():\n assert find_module_py33('_io') == (None, '_io', False)", "def test():\n import doctest\n from . import locate\n return doctest.testmod(locate)", "def test_CL04_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import CL04 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.CL04\", test]", "def test_import():\n import pyapp", "def test_untar(self):", "def test_not_public(self):\n self.change_status(self.version_1_2_2, amo.STATUS_NULL)\n self.addon.update(status=amo.STATUS_NULL)\n version, file = self.get('1.2.1', self.version_int,\n self.app, self.platform)\n assert version == self.version_1_2_1", "def testInitialization(self):\n # pytype: disable=attribute-error\n self.assertEqual(self.grr_hunt_downloader.hunt_id, 'H:12345')\n # pytype: enable=attribute-error", "def test_square():\n\n from my_pkg.trial import square\n\n assert 4 == square(2)", "def test_failure():\n with pytest.raises(ModuleNotFoundError):\n import torch # noqa: F401\n\n with pytest.raises(ModuleNotFoundError):\n import tensorflow # noqa: F401\n\n with pytest.raises(ModuleNotFoundError):\n import horovod # noqa: F401\n\n with pytest.raises(ModuleNotFoundError):\n from ray import tune # noqa: F401", "def test_direct_import():\n from cython_oracle.oracle import answer_to_all_questions\n\n assert answer_to_all_questions() == 42", "def test_compiled_import_none(monkeypatch, Script):\n monkeypatch.setattr(compiled, 'load_module', lambda *args, **kwargs: None)\n assert not Script('import sys').goto_definitions()", "def _test():\n import doctest\n return doctest.testmod(verbose=True)", "def _test():\n import doctest\n return doctest.testmod(verbose=True)", "def test_import_allows_multiple_modules_failure(self):\n # Deliberately using modules that will already be imported to avoid side effects.\n feature = LazyImportTester([\"site\", \"sys\", \"_qiskit_module_does_not_exist_\"])\n with mock_availability_test(feature) as check:\n check.assert_not_called()\n self.assertFalse(feature)\n check.assert_called_once()", "def test_is_not_hidden(self) -> None:\n path = \"home\"\n result = is_hidden(path)\n self.assertFalse(result)", "def testFindsBuiltins(self):\r\n self.assertEqual('sys', modulefinder.get_module_filename('sys'))\r\n self.assertEqual('time', modulefinder.get_module_filename('time'))", "def test_module_attribute() -> None:\n assert hasattr(lmp.tknzr._bpe, 'BPETknzr')\n assert inspect.isclass(lmp.tknzr._bpe.BPETknzr)\n assert not inspect.isabstract(lmp.tknzr._bpe.BPETknzr)\n assert issubclass(lmp.tknzr._bpe.BPETknzr, BaseTknzr)\n\n assert hasattr(lmp.tknzr._bpe, 'EOW_TK')\n assert isinstance(lmp.tknzr._bpe.EOW_TK, str)\n assert lmp.tknzr._bpe.EOW_TK == '<eow>'\n\n assert hasattr(lmp.tknzr._bpe, 'SPLIT_PTTN')\n assert isinstance(lmp.tknzr._bpe.SPLIT_PTTN, re.Pattern)\n assert lmp.tknzr._bpe.SPLIT_PTTN.pattern == r'(<bos>|<eos>|<pad>|<unk>|\\s+)'", "def test_plugin_with_no_plugin_class(conf):\n # For fun, we pass in a system library\n installed_apps_before = conf.config[\"INSTALLED_APPS\"][:]\n cli.plugin(\"os.path\")\n assert installed_apps_before == conf.config[\"INSTALLED_APPS\"]", "def test_anon_private(self):\n self.do_visible(True, None, False)", "def test_doesnt_implement_process(self):\r\n some_io = StringIO.StringIO()\r\n imp = Importer(some_io)\r\n self.assertRaises(NotImplementedError, imp.process)" ]
[ "0.6566218", "0.6470722", "0.63644457", "0.6197142", "0.6101666", "0.6054569", "0.59973603", "0.5936304", "0.5921999", "0.5883885", "0.5871062", "0.5861786", "0.5840665", "0.5811318", "0.57991433", "0.57575244", "0.5749411", "0.5728871", "0.5727159", "0.56983817", "0.5689791", "0.56868184", "0.56825", "0.56740373", "0.5656663", "0.5655408", "0.56342113", "0.56316704", "0.56263113", "0.56263113", "0.56263113", "0.5620678", "0.5618361", "0.5615398", "0.5615398", "0.5606529", "0.5600223", "0.5599129", "0.55953044", "0.5589277", "0.5563883", "0.5554263", "0.55513024", "0.55355626", "0.55328083", "0.5528729", "0.5518308", "0.5518308", "0.5518308", "0.5518308", "0.5518308", "0.5511149", "0.5505926", "0.55001724", "0.5492887", "0.5489739", "0.54878235", "0.54780364", "0.5460626", "0.54599285", "0.5441113", "0.54395163", "0.54291135", "0.5424271", "0.5424271", "0.54236084", "0.5420842", "0.5413275", "0.5413275", "0.54132056", "0.5413028", "0.54062337", "0.5402329", "0.53994006", "0.539839", "0.5395763", "0.53931004", "0.5381164", "0.53802633", "0.5378874", "0.5371958", "0.536662", "0.5365144", "0.5363499", "0.5363153", "0.5360565", "0.5353558", "0.5348922", "0.53477085", "0.5338624", "0.5330652", "0.53244996", "0.53244996", "0.53194827", "0.53138226", "0.529975", "0.52973294", "0.52960145", "0.52840066", "0.5275637" ]
0.6069717
5
Test hiding opponents' hands.
def test_privatize_hands(self): g = Game() g.add_player(uuid4(), 'p0') g.add_player(uuid4(), 'p1') gs = g p0, p1 = gs.players latrine, insula, jack, road = cm.get_cards(['Latrine', 'Insula', 'Jack', 'Road']) p0.hand.set_content([latrine, insula]) p1.hand.set_content([jack, road]) gs_private = g.privatized_game_state_copy('p0') p0, p1 = gs_private.players self.assertIn(jack, p1.hand) self.assertIn(Card(-1), p1.hand) self.assertNotIn(road, p1.hand) self.assertIn(latrine, p0.hand) self.assertIn(insula, p0.hand) self.assertEqual(len(p0.hand), 2) self.assertEqual(len(p1.hand), 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_play_no_gain(self):\n self.card = self.g[\"Festival\"].remove()\n self.plr.piles[Piles.HAND].set(\"Duchy\")\n self.plr.add_card(self.card, Piles.HAND)\n self.plr.favors.set(2)\n self.plr.test_input = [\"No\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.favors.get(), 2)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 1)", "def test_play_nobane(self):\n self.victim.piles[Piles.HAND].set(\"Copper\", \"Silver\")\n self.attacker.piles[Piles.HAND].set(\n \"Copper\", \"Silver\", \"Gold\", \"Duchy\", \"Province\"\n )\n self.attacker.add_card(self.card, Piles.HAND)\n self.attacker.test_input = [\"Duchy\", \"Province\", \"finish\"]\n self.attacker.play_card(self.card)\n try:\n self.assertIn(self.g[self.g._bane].cost, (2, 3))\n self.assertEqual(self.attacker.piles[Piles.HAND].size(), 5 + 2 - 2)\n self.assertIn(\"Curse\", self.victim.piles[Piles.DISCARD])\n except AssertionError: # pragma: no cover\n print(f\"Bane={self.g._bane}\")\n self.g.print_state()\n raise", "def test_discard_action(self):\n self.plr.test_input = [\"discard silver\", \"finish selecting\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 2)\n self.assertEqual(self.plr.actions.get(), 2)\n self.assertEqual(self.plr.buys.get(), 1)\n self.assertNotIn(\"Silver\", self.plr.piles[Piles.HAND])", "def showdown(self):\r\n\r\n poker_hands = []\r\n message = \"\"\r\n for player in self.players:\r\n poker_hands.append(player.hand.best_poker_hand(self.community_cards.cards))\r\n\r\n # Reveal all cards when the round is over\r\n player.reveal_cards()\r\n\r\n if poker_hands[0].type > poker_hands[1].type:\r\n message = \"Player {} won! \\nPoker hand >{}< won against >{}<\".format(\r\n self.players[0].name, str(poker_hands[0].type), str(poker_hands[1].type))\r\n self.players[0].credits += self.pot\r\n\r\n if poker_hands[0].type < poker_hands[1].type:\r\n message = \"Player {} won! \\nPoker hand >{}< won against >{}<\".format(\r\n self.players[1].name, str(poker_hands[1].type), str(poker_hands[0].type))\r\n self.players[1].credits += self.pot\r\n\r\n if poker_hands[0].type == poker_hands[1].type:\r\n if poker_hands[0].highest_values > poker_hands[1].highest_values:\r\n message = \"Player {} won! \\nHighest value >{}< won against >{}<\".format(\r\n self.players[0].name, str(poker_hands[0].highest_values), str(poker_hands[1].highest_values))\r\n self.players[0].credits += self.pot\r\n\r\n elif poker_hands[0].highest_values < poker_hands[1].highest_values:\r\n message = \"Player {} won! \\nHighest value >{}< won against >{}<\".format(\r\n self.players[1].name, str(poker_hands[1].highest_values), str(poker_hands[0].highest_values))\r\n self.players[1].credits += self.pot\r\n\r\n elif poker_hands[0].highest_values == poker_hands[1].highest_values:\r\n message = \"It is a draw! Both players had >{}< and highest value >{}<\".format(\r\n poker_hands[0].type.name, str(poker_hands[0].highest_values))\r\n\r\n for player in self.players:\r\n player.credits += (self.pot // len(self.players))\r\n else:\r\n self.game_message_warning.emit(\"Incorrect comparison of poker hands\")\r\n\r\n self.new_output.emit(message)\r\n self.game_message.emit(message)\r\n self.new_credits.emit()\r\n self.new_pot.emit()", "def test_play_bane(self):\n self.victim.piles[Piles.HAND].set(\"Copper\", \"Silver\", self.g._bane)\n self.attacker.piles[Piles.HAND].set(\n \"Copper\", \"Silver\", \"Gold\", \"Duchy\", \"Province\"\n )\n self.attacker.add_card(self.card, Piles.HAND)\n self.attacker.test_input = [\"Duchy\", \"Province\", \"finish\"]\n self.attacker.play_card(self.card)\n try:\n self.assertNotIn(\"Curse\", self.victim.piles[Piles.DISCARD])\n except AssertionError: # pragma: no cover\n print(f\"Bane={self.g._bane}\")\n self.g.print_state()\n raise", "def test_switch_hidden1(self):\n self.test_object.switch_hidden()\n self.assertFalse(self.test_object.get_hidden())", "def reset_hands(self):\r\n\r\n self.player_rock = False\r\n self.player_paper = False\r\n self.player_scissors = False\r\n self.opp_rock = False\r\n self.opp_paper = False\r\n self.opp_scissors = False", "def test_discard_buy(self):\n self.plr.test_input = [\"finish selecting\", \"discard gold\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 2)\n self.assertEqual(self.plr.actions.get(), 1)\n self.assertEqual(self.plr.buys.get(), 2)\n self.assertNotIn(\"Gold\", self.plr.piles[Piles.HAND])", "def hide_card(self):\n try:\n self.hidden_card_value = self.hand[1]\n self.hand[1] = Card()\n except IndexError:\n print('The dealer does not have enough cards!')", "def test_for_non_splittable_hand(self):\n hand = self._hand\n cards = [BjCard('clubs', '7'), BjCard('diamonds', '4')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.can_split, False)", "def test_privatize_fountain_card(self):\n g = Game()\n g.add_player(uuid4(), 'p0')\n g.add_player(uuid4(), 'p1')\n\n gs = g\n p0, p1 = gs.players\n\n latrine, insula, statue, road = cm.get_cards(['Latrine', 'Insula', 'Statue', 'Road'])\n p0.fountain_card = latrine\n\n gs_private = g.privatized_game_state_copy('p1')\n p0, p1 = gs_private.players\n\n self.assertEqual(p0.fountain_card, Card(-1))", "def test_switch_hidden2(self):\n self.test_object.switch_hidden()\n self.test_object.switch_hidden()\n self.assertTrue(self.test_object.get_hidden())", "def deal(self):\n self.dealer.hit(self.deck)\n self.dealer.hit(self.deck)\n self.player.hit(self.deck)\n self.player.hit(self.deck)\n\n if self.player.sum_cards() == 21:\n self.round_winner = True\n self.print_hands()\n print(\"BLACKJACK! You win!\")", "def deal_hands( self ):\n \tself.shuffle()\n \thand_one = []\n \thand_two = []\n\n \tfor counter in range(5):\n \t\thand_one.append(self.deal())\n \t\thand_two.append(self.deal())\n\n \treturn hand_one, hand_two", "def show_card(self):\n return self.hands.show(0)", "def test_for_non_blackjack(self):\n hand = self._hand\n cards = [BjCard('clubs', '8'), BjCard('diamonds', '8')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.is_blackjack, False)", "def is_game_win(self):\n return not self.deck and not self.hand", "def still_in_hand(self):\n return len(self.hand.cards)!=0", "def test_play(self):\n self.card = self.g[\"Festival\"].remove()\n self.plr.piles[Piles.HAND].set(\"Duchy\")\n self.plr.add_card(self.card, Piles.HAND)\n self.plr.favors.set(2)\n self.plr.test_input = [\"Gain\"]\n self.plr.play_card(self.card)\n self.g.print_state()\n self.assertEqual(self.plr.favors.get(), 1)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 1 + 1)", "def hide_people(self):\n for panel in self.health_panels.values():\n panel.hide_person()\n panel.set_drinking_state(healthpanel.S_OFF)", "def deal(self):\n hands = sample(self.deck, 13) #random sample so no need to shuffle\n hand1, hand2, flip = hands[:6], hands[6:-1], hands[-1]\n return hand1, hand2, flip", "def test_tie_when_both_hands_are_straightflush():\n from poker_rankings import PokerHand\n heroes_hand = PokerHand(\"5H 4H 3H 2H AH\")\n villains_hand = PokerHand(\"5H 4H 3H 2H AH\")\n heroes_hand.compare_with(villains_hand) == 'Tie'", "def handDecision(handIn):", "def testDarkenedRoom(self):\n darkThings = list(idea.find(self.observer.idea, iimaginary.IVisible))\n self.assertDarkRoom(darkThings[0])\n self.assertEquals(len(darkThings), 1)", "def deal(self):\n\n if self.dealer: # Has cards in hand\n self.dealer.reset()\n\n if self.player: # Has cards in hand\n self.player.reset()\n\n dealer_first = self.deck.draw()\n dealer_second = self.deck.draw()\n dealer_second.flip()\n self.dealer.take_card(dealer_first)\n self.dealer.take_card(dealer_second)\n\n player_first = self.deck.draw()\n player_second = self.deck.draw()\n player_first.flip()\n player_second.flip()\n self.player.take_card(player_first)\n self.player.take_card(player_second)\n\n if self.verbose:\n print('Player bets:', self.player_bet)\n for player in (self.player, self.dealer):\n print(player, 'dealt:')\n for card in player:\n if card.face():\n print(' '*3, str(card)+':', 'face up')\n else:\n print(' '*3, str(card)+':', 'face down')", "def displayHands(p_hand, d_hand):\n os.system('clear') # Call to OS clear the screen to clean up output\n print(\"\\nPlayer hand: \", p_hand.showHand())\n print(\"Player score: \", p_hand.handSum())\n\n print(\"\\nDealer hand: \", d_hand.showHand())\n print(\"Dealer score: \", d_hand.handSum())", "def showdown(game, episode):\n # return winner\n print(\"Player 1:\", game.player1.pocket.cards)\n print(\"Player 2:\", game.player2.pocket.cards)\n print(\"Community Cards:\", game.community_cards.cards)\n\n if game.player1.folded:\n game.winner = \"Player2\"\n game.player2.funds += game.table_pot\n elif game.player2.folded:\n game.winner = \"Player1\"\n game.player1.funds += game.table_pot\n else:\n winner = compare_hands(game.player1.pocket.cards, game.player2.pocket.cards, game.community_cards.cards)\n if winner == \"Player1\":\n game.winner = \"Player1\"\n game.player1.funds += game.table_pot\n elif winner == \"Player2\":\n game.winner = \"Player2\"\n game.player2.funds += game.table_pot\n else:\n game.winner = \"Tie\"\n game.player1.funds += game.table_pot/2\n game.player2.funds += game.table_pot/2\n\n mc_control_epsilon_greedy(episode, game, game.player1)\n mc_control_epsilon_greedy(episode, game, game.player2)\n print(\"Winner:\", game.winner)\n print(\"Player 1:\", game.player1.funds)\n print(\"Player2:\", game.player2.funds)\n print(\"Game Over\")\n print(\"New Round\")", "def test_is_hidden(self) -> None:\n path = \".ssh\"\n result = is_hidden(path)\n self.assertTrue(result)", "def test_hand_is_straightflush():\n from poker_rankings import PokerHand\n heroes_hand = PokerHand(\"5H 4H 3H 2H AH\")\n assert heroes_hand._is_flush == True\n assert heroes_hand._is_straight == True\n assert heroes_hand._hand_value == 9", "def deal_hands(self, num_bots):\n hand_size = self.parent.hand_size()\n hands = []\n full_deck = cards.full_deck()\n hand_cards = random.sample(full_deck, hand_size * num_bots)\n self.deck = [c for c in full_deck if not c in hand_cards]\n\n for i in range(0, num_bots):\n hand = []\n for j in range(0, hand_size):\n hand.append(hand_cards.pop())\n hands.append(hand)\n\n return hands", "def test_nonVisibilityAffected(self):\n self.assertEquals(visibles(self.observer.idea, iimaginary.IThing), [])\n # XXX need another test: not blocked out from ...", "def flush_udacity(hand):\n suits = [s for r,s in hand]\n return len(set(suits)) == 1", "def is_miss_deal(hand: list, mighty: Card) -> bool:\n point_card_count = 0\n for card in hand:\n if card.is_pointcard() and card != mighty:\n point_card_count += 1\n\n if point_card_count <= 1:\n return True\n else:\n return False", "def test_partial_deck_doesnt_have_ignored_cards(self):\n self.assertEqual(self.ignoredCardPresent, False)", "def play_for_dealer(self):\n while self.dealer.sum_cards() < 17:\n self.dealer.hit(self.deck)\n else:\n self.round_winner = True\n self.print_hands()\n self.determine_winner()", "def testInvisibleNegative(self):\n slhafile=\"./testFiles/slha/higgsinoStop.slha\"\n model = Model(BSMList,SMList)\n model.updateParticles(slhafile)\n topos = decomposer.decompose(model, .1*fb, False, True, 5.*GeV)\n tested = False\n for topo in topos:\n if str(topo)!=\"[1,1][1,1]\":\n continue\n for element in topo.elementList:\n if str(element)!=\"[[[t+],[t-]],[[q],[W+]]]\":\n continue\n tested = True\n trueMothers = [mother for mother in element.motherElements if not mother is element]\n self.assertEqual(len(trueMothers),0)\n self.assertTrue(tested)", "def test_not_is_the_same(self):\n self.assertFalse(show_players_sumheigh_is_input(140), \"-Mike Wilks Mike Wilks\")", "def test_nonVisibilityUnaffected(self):\n self.assertEquals(\n list(self.observer.idea.obtain(\n idea.Proximity(3, idea.ProviderOf(iimaginary.IThing)))),\n [self.observer, self.location, self.rock]\n )", "def opponent_hand(self):\r\n\r\n # 1 = rock, 2 = paper, 3 = scissors\r\n random_hand = random.randint(1, 3)\r\n\r\n # Slows down the pace of the game with pauses\r\n self.loading(0.5)\r\n\r\n if random_hand == 1:\r\n\r\n opp_hand_value = (\" \" * 72) + \"OPPONENT: ROCK\"\r\n self.opp_rock = True\r\n print(\"Opponent chose Rock.\")\r\n\r\n elif random_hand == 2:\r\n\r\n opp_hand_value = (\" \" * 72) + \"OPPONENT: PAPER\"\r\n self.opp_paper = True\r\n print(\"Opponent chose Paper.\")\r\n\r\n elif random_hand == 3:\r\n\r\n opp_hand_value = (\" \" * 72) + \"OPPONENT: SCISSORS\"\r\n self.opp_scissors = True\r\n print(\"Opponent chose Scissors.\")\r\n\r\n # Clear the opponent hand entry box\r\n self.opp_hand_entry.delete(0, \"end\")\r\n\r\n # Insert the value of the randomized hand of the opponent\r\n self.opp_hand_entry.insert(0, opp_hand_value)", "def player_lose(self):\n global chips\n chips = chips - self.final_bet\n self.defeat = True\n placed_bet = False", "def test_not_unopposed(self):\n s1 = self.battle.create_skirmish(self.alice, 2) # Attack 2\n s1.react(self.bob, 1) # --Attack 1\n s1.resolve()\n self.assertFalse(s1.unopposed)", "def test_is_not_hidden(self) -> None:\n path = \"home\"\n result = is_hidden(path)\n self.assertFalse(result)", "def test_is_not_missed():\n game = Game()\n game.word = 'word'\n assert game.is_missed('w') is False", "def play_hand(self):\n bots = self.living_bot_names()\n self.hands = self.deal_hands(len(bots))\n blinds_round = self.post_blinds(bots)\n self.betting_round(blinds_round)\n\n hand_phases = [self.deal_table_cards,\n self.betting_round,\n self.deal_table_cards,\n self.betting_round,\n self.showdown,\n self.blind_manager.finish_hand, ]\n\n for phase in hand_phases:\n pass", "def toggle_empty_hand(self):\n # well if hand is empty, unpack it from player\n if self.empty.get() is True:\n self.pack_forget()\n else:\n self.player.pack_hand() # bring it back\n # (depending on phase i.e. intelligence", "def test_play(self):\n self.plr.piles[Piles.DECK].set(\"Province\")\n self.plr.add_card(self.card, Piles.HAND)\n self.plr.test_input = [\"keep\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.coins.get(), 2)\n self.assertIn(\"Province\", self.plr.piles[Piles.DECK])\n self.assertNotIn(\"Province\", self.plr.piles[Piles.DISCARD])", "def test_play_card(self):\n self.plr.piles[Piles.DECK].set(\"Silver\", \"Province\", \"Moat\", \"Gold\")\n self.vic.piles[Piles.DECK].set(\"Duchy\")\n self.plr.test_input = [\"discard\", \"discard\", \"putback\"]\n self.plr.play_card(self.card)\n self.g.print_state()\n self.assertEqual(self.plr.actions.get(), 1)\n self.assertIn(\"Duchy\", self.vic.piles[Piles.DISCARD])\n self.assertIn(\"Gold\", self.plr.piles[Piles.DISCARD])\n self.assertIn(\"Province\", self.plr.piles[Piles.HAND])\n self.assertIn(\"Moat\", self.plr.piles[Piles.HAND])\n self.assertIn(\"Silver\", self.plr.piles[Piles.DECK])", "def hide_potentially_fake_levels(self, hide):\n return self._toggle_filter(Filters.PotentiallyFake, hide)", "def flush(hand):\n return len(set([suit for value, suit in hand])) == 1", "def hide_correctness(self):\n self.hiddens.add('correct')\n self.hiddens.add('score')", "def test_visible_blacklisted(self):\n\n self.feature_test.set_percentage(100)\n self.feature_test.add_to_blacklist(3)\n self.assertFalse(self.feature_test.is_visible(3))", "def hide_give_buttons(self):\n #\n # Don't show the give buttons\n for panel in self.health_panels.values():\n panel.hide_button()", "def testOddPlayersWithBye():\n deleteMatches()\n deletePlayers()\n registerPlayer(\"Alien 1\")\n registerPlayer(\"Alien 2\")\n registerPlayer(\"Alien 3\")\n standings = playerStandings()\n [id1, id2, id3] = [row[0] for row in standings]\n reportMatch(id1, id2)\n reportMatch(id3, id3, False, True)\n reportMatch(id2, id3)\n reportMatch(id1, id1, False, True)\n\n # id1 and id3 have one bye each. In this round, bye should be given to id2\n pairings = swissPairings()\n for pairing in pairings:\n if pairing[0]!=id2 and pairing[0]==pairing[2]:\n raise ValueError(\n \"swissPairings() should not award bye to a player who already\"\n \"has a bye.\"\n )\n if pairing[0]==id2 and pairing[2]!=id2:\n raise ValueError(\n \"swissPairings() has to award a bye when there is an odd number\"\n \"of players.\"\n )\n print \"2. Bye is not given to a player who already has a bye.\"", "def player_show_hand(self):\n for card in self.get_hand():\n print(card.get_card())", "def highCard(p1name, p2name, p1hand, p2hand):\n\tplayer1 = list(p1hand)\n\tplayer2 = list(p2hand)\n\n\tif player1[0] == \"A\" or player1[1] == \"A\":\n\t\tprint(\"%s wins!\" % p1name)", "def hidden():\n return False", "def is_blackjack(self):\n if self.hand == 21 and len(list(self)) ==2:\n print '%s = Blackjack'%self\n return True", "def dealer_play(play_shoe, dealer):\n dealer.reveal_hidden_card()\n print(\"Now for the dealer\")\n print(\"Dealer reveals: \" + dealer.get_hidden_card_only().get_card_details())\n print(\"Dealer's hand:\" + dealer.get_viewable_hand())\n decide_soft_score_print(dealer)\n if dealer.get_score() < 17:\n hit = True\n while hit:\n print(\"Dealer hits\")\n dealer.hit_hand(play_shoe)\n print(\"Dealer's hand \" + dealer.get_viewable_hand())\n decide_soft_score_print(dealer)\n hit = dealer.check_hit(play_shoe)\n check_stand(dealer)", "def compare_hands(self):\r\n\r\n # Slows down the pace of the game with pauses\r\n self.loading(0.25)\r\n\r\n # If the round ends in a tie, the try_again will be set to true so that the program knows\r\n # to restart the round without incrementing the round number or changing the win/lose record\r\n if (self.player_rock is True and self.opp_rock is True) or (\r\n self.player_paper is True and self.opp_paper is True) or (\r\n self.player_scissors is True and self.opp_scissors is True):\r\n\r\n self.try_again = True\r\n\r\n self.player_tie()\r\n\r\n else:\r\n\r\n # If there is no draw, then the code proceeds to determine the winner and the loser.\r\n self.try_again = False\r\n\r\n if self.player_rock is True and self.opp_scissors is True:\r\n\r\n self.player_win()\r\n\r\n elif self.player_rock is True and self.opp_paper is True:\r\n\r\n self.player_lose()\r\n\r\n elif self.player_paper is True and self.opp_rock is True:\r\n\r\n self.player_win()\r\n\r\n elif self.player_paper is True and self.opp_scissors is True:\r\n\r\n self.player_lose()\r\n\r\n elif self.player_scissors is True and self.opp_paper is True:\r\n\r\n self.player_win()\r\n\r\n elif self.player_scissors is True and self. opp_rock is True:\r\n\r\n self.player_lose()\r\n\r\n # Clear the summary entry box\r\n self.summary_entry.delete(0, \"end\")\r\n\r\n # Insert a new value which lets the player know if they won that round\r\n self.summary_entry.insert(0, self.summary)", "def test_discard(self):\r\n deck_size = 3\r\n d = Deck(deck_size)\r\n for _ in range(deck_size):\r\n d.draw()\r\n d.discard([1, 3])\r\n drawn = d.draw(2)\r\n self.assertEqual(len(drawn), 2)\r\n self.assertIn(1, drawn)\r\n self.assertIn(3, drawn)", "def test_hand_is_flush(hand, result):\n from poker_rankings import PokerHand\n heros_hand = PokerHand(hand)\n assert heros_hand._is_flush == result", "def test_when_opponent_all_Ds(self):\n self.responses_test([C, C, C, C], [D, D, D, D], [D, D, D], random_seed=5)", "def indicate_discard_card(whose_turn,players):\n cards_to_choose_from = players[whose_turn].hand.cards\n players[whose_turn].hand.print_cards()\n chosen_to_discard = int(input('Select a card to discard. Type a number. '))\n return chosen_to_discard", "def blind_bet(self):\n self.this_player.bet(SMALL_BLIND_BET)\n self.other_player.bet(BIG_BLIND_BET)\n if SHOW_MESSAGE:\n print(\"Making blind bets.\")\n print(\"Player1:\")\n self.player1.show()\n print(\"Player2:\")\n self.player2.show()", "def game_over(players):\n active_players = players_with_decks(players)\n if not active_players or len(active_players) == 1:\n return True\n return False", "def draw_hands(n_players=1):\n if n_players > 6:\n assert \"too many players. someone can't play.\"\n\n deck = make_deck()\n\n random.shuffle(deck)\n\n hands = []\n\n for i in range(n_players):\n hands.append(deck[15*i:15*(i+1)])\n\n bag = deck[n_players*15:]\n\n return hands, bag", "def house_deal(self) -> None:\n if not self.has_game_ending_hand:\n while max(self.house.hand.value) < 17:\n print(f\"{self.deal_card(self.house)}\")", "def gameOver(self):\n\t\treturn self.lives == 0", "def test_habits_without_trackings():\n untracked_habits = analytics.habits_without_trackings(analytics.habits_table,\n [])\n assert untracked_habits == []", "def is_hidden():\n return False", "def is_hidden():\n return False", "def is_card_in_other_hands(self, own_hand_index, card):\n for i, hand in enumerate(self.hands):\n if i == own_hand_index:\n continue\n if card in hand:\n return True\n return False", "def user_play(play_shoe, player, dealer):\n print(\"\\nDealer shows:\" + dealer.get_viewable_hand())\n hit = True\n while hit == True:\n decision = \" \"\n if len(player.get_hand()) == 2:\n print(\"\\nPlayer \" + player.get_name() + \" your hand is:\" + player.get_viewable_hand())\n else:\n print(\"\\nYour hand is now:\" + str(player.get_viewable_hand()))\n decide_soft_score_print(player)\n if not(check_blackjack(player.get_score(), player.get_hand())):\n if not(player.check_bust()) and player.get_score() < 21:\n while not(decision[0] == \"h\") and not(decision[0] == \"s\"):\n decision = input(\"Would you like to Hit or Stand? \").lower()\n if decision[0]==\"h\":\n player.hit_hand(play_shoe)\n else:\n hit = False\n else:\n hit = False\n else:\n hit = False\n check_stand(player)", "def test_empty_private_owned(self):\n self.do_visible(True, 'pattieblack', False, is_admin=True)", "def isGameOver(self):\n for i in range(self.rows):\n for j in range(self.columns):\n if self.grid[i][j].face == 'down':\n return False\n #if here then all cards must be face up\n return True", "async def test_turn_off(opp):\n await common.async_set_hvac_mode(opp, HVAC_MODE_HEAT, ENTITY_CLIMATE)\n state = opp.states.get(ENTITY_CLIMATE)\n assert state.state == HVAC_MODE_HEAT\n\n await common.async_turn_off(opp, ENTITY_CLIMATE)\n state = opp.states.get(ENTITY_CLIMATE)\n assert state.state == HVAC_MODE_OFF", "def action_house_reveal(self) -> None:\n self.house.hand.reveal_hand()\n print(f\"\\nThe house reveals their hand containing: {self.house.hand}, totalling to {self.house.hand.value}\")", "def head_surprised():\n print (hair_spiky())\n print (eye_wide())\n print (nose_leftwards())\n print (mouth_open())\n print (chin_combo())", "def is_summon(self):\n return False", "def is_round_over(whose_turn,players):\n if ((len(players[whose_turn].hand.cards) == 0) and (players[whose_turn].has_discarded == True)):\n round_over = True\n else:\n round_over = False\n return round_over", "def test_like_unlike_game(self):\n url = reverse('like-game')\n data = {\n 'igdb': self.game.igdb,\n 'name': self.game.name,\n 'slug': self.game.slug,\n 'cover_id': self.game.cover_id,\n 'backdrop_id': self.game.backdrop_id\n }\n\n like = self.client.post(url, data, format='json')\n self.assertEqual(True, like.data['value'])\n\n unlike = self.client.post(url, data, format='json')\n self.assertEqual(False, unlike.data['value'])", "def test_actionWithNoTargetInDarkRoom(self):\n self._test(\n \"wear pants\",\n [\"It's too dark to see.\"], # to dark to see... the pants? any pants?\n [])", "def conclude_hand(self):\n for position in self.positions.keys():\n if position not in self.cards:\n self.cards[position] = (Card(), Card())", "def test_for_dealing_card():\n deck1 = Shoe()\n deck1.deal_card()\n assert len(deck1.deck) == 51", "def testHand():\n\n failure = False\n print(\"\\n ---------- Test Hand ---------\")\n\n h = Hand(8, {'a':3, 'b':2, 'd':3})\n\n print(\"Mano actual:\", h)\n h.update('bad')\n print(\"Palabra dada: bad\")\n print(\"Mano actual:\", h)\n\n if h.containsLetters('aabdd') and not h.isEmpty():\n failure = True\n else:\n failure = False\n print(\"FAILURE: Debería estar la letras 'aabdd' y además no estar vacío\")\n\n h.update('dad')\n print(\"Palabra dada: dad\")\n print(\"Mano actual:\", h)\n if h.containsLetters('ab') and not h.isEmpty():\n failure = True\n else:\n failure = False\n print(\"FAILURE: Debería estar la letras 'aabdd' y además no estar vacío\")\n\n h.update('ab')\n print(\"Palabra dada: ab\")\n print(\"Mano actual:\", h)\n\n if h.isEmpty():\n failure = True\n else:\n failure = False\n print(\"FAILURE: Debería estar vacío\")\n\n print(\"Comparación de jugadas: \")\n print(\"h = Hand(8, {'a':3, 'b':2, 'd':3})\")\n h = Hand(8, {'a':3, 'b':2, 'd':3})\n print(\"g = Hand(8, {'a':3, 'b':2, 'd':3})\")\n g = Hand(8, {'a':3, 'b':2, 'd':3})\n print(\"j = Hand(8, {'a':3, 'b':2, 'd':3})\")\n j = Hand(7, {'a':2, 't':2, 'p':3})\n print(\"¿h = g?\", h == g)\n print(\"¿h,g = j?\", h == j or g == j)\n\n if failure:\n print(\"SUCCESS: testHand()\")\n else:\n print(\"FAILURE: testHand()\")", "def test_when_opponent_all_Ds(self):\n self.responses_test([D, D, D, D], [D, D, D, D], [D, D, D],\n random_seed=5)", "def hand_empty(self):\n return len(self.cards) == 0", "def assertDarkRoom(self, visible):\n descr = visible.visualizeWithContents([])\n expressed = descr.plaintext(self.observer)\n lines = commandutils.flatten(expressed).splitlines()\n\n self.assertEquals(\n lines,\n [u\"[ Blackness ]\",\n u\"You cannot see anything because it is very dark.\"])", "def is_unhappy(self):\n #checked!#\n ###your code here###\n same=0\n for i in self.home.neighbors:\n if i.occupant!=None:\n if i.occupant.group==self.group:\n same+=1\n happniess=float(same)/len(self.home.neighbors)\n if happniess<self.happiness_threshold:\n return True\n else:\n return False", "def event_house_blackjack(self) -> None:\n if 21 in self.user.hand.value:\n self.event_player_push()\n else:\n print(\"The house has blackjack\")\n self.event_house_wins()", "def hit(hand=bj.player1.hand):\r\n hand.append(bj.deck.remove_card())", "def discard(self):\n # Would need more logic...\n # Should be specified, won't use in 'war'\n return self.hand.pop()", "def player_hit(self):\n self.player.hit(self.deck)\n self.print_hands()\n \n if self.player.sum_cards() > 21:\n self.round_winner = True\n self.print_hands()\n print(\"BUST! Dealer wins.\")", "def hand():\n return PokerHand()", "def test_visible_data(state):\n st_data = state.to_player_data(0)\n\n assert st_data, \"Expect that we would have some data!\"\n assert \"deck\" not in st_data, \"We should not see the deck\"\n assert len(st_data[\"discarded\"]) == 0, \"We should see discarded\"\n\n # Should see all data of the player self\n assert len(st_data[\"self\"][\"hand\"]) == 0\n\n # Should not see other player's data\n other_hand = st_data[\"others\"][0]\n assert \"hand\" not in other_hand\n assert len(other_hand[\"open_hand\"]) == 0", "def attemptBearOff(self, point):\r\n \r\n # Creates the values to be compared to dice numbers for both teams\r\n for num in self.diceNumbers:\r\n if self.getTurn() == RED:\r\n compare = 24 - point.getNumber()\r\n else:\r\n compare = point.getNumber() + 1\r\n \r\n # If the dice number is sufficiently large enough, the point's \r\n # checker piece leaves the board for good and is placed in the \r\n # checkerBox, indicating that it is no longer in play\r\n if num >= compare:\r\n self.checkerBox.addChecker(point.returnChecker())\r\n point.removeChecker()\r\n point.organize()\r\n point.update()\r\n point.setActiveTurn()\r\n self.diceNumbers.remove(num)\r\n self.drawBoard()\r\n \r\n # Checks whether the current player has won the game yet\r\n self.checkWinner(self.surface)\r\n \r\n # Changes teams if all the dice numbers are used up\r\n if not len(self.diceNumbers) and not self.isGameWon():\r\n self.changeTurn()\r\n \r\n return True #This value indicates that bear off was successful\r\n \r\n return False", "def choose_card_to_discard(self):\n random.choice(self.hand.card_list).use()", "def show_hand(self):\n for card in self.hand:\n print(card)", "def print_hands(self):\n # Clear the terminal and reprint round header\n os.system(\"clear\")\n self.print_header\n\n # Only display one of the dealers cards if they are still playing\n if not self.round_winner:\n print()\n print(\"Dealer's Cards\")\n print(\"=\" * 25)\n print(\"UNKNOWN\")\n for card in self.dealer.cards:\n if card != self.dealer.cards[0]:\n print(f\"{card.game_value} of {card.suit}\")\n print(\"-\"*25)\n print(\"TOTAL = ?\")\n print()\n\n print(\"Player's Cards\")\n print(\"=\" * 25)\n for card in self.player.cards:\n print(f\"{card.game_value} of {card.suit}\")\n print(\"-\" * 25)\n print(\"TOTAL = \" + str(self.player.sum_cards()))\n print()\n\n # Display the players cards and all of the dealers cards\n elif self.round_winner:\n print()\n print(\"Dealer's Cards\")\n print(\"=\" * 25)\n for card in self.dealer.cards:\n print(f\"{card.game_value} of {card.suit}\")\n print(\"-\" * 25)\n print(\"TOTAL = \" + str(self.dealer.sum_cards()))\n print()\n\n print(\"Player's Cards\")\n print(\"=\" * 25)\n for card in self.player.cards:\n print(f\"{card.game_value} of {card.suit}\")\n print(\"-\" * 25)\n print(\"TOTAL = \" + str(self.player.sum_cards()))\n print()\n pass", "def test_set_hidden(self):\n self.test_object.set_hidden(False)\n self.assertFalse(self.test_object.get_hidden())" ]
[ "0.66469085", "0.66251075", "0.661568", "0.6462299", "0.63435477", "0.625224", "0.6242946", "0.62145275", "0.61973673", "0.6194856", "0.61828953", "0.6082449", "0.60560155", "0.6051513", "0.59985155", "0.5985109", "0.5983806", "0.5977845", "0.5970393", "0.5968302", "0.5965452", "0.59233266", "0.5873228", "0.58692884", "0.58462363", "0.5836998", "0.5820925", "0.580706", "0.5802923", "0.57816917", "0.5779972", "0.5755657", "0.5752481", "0.5751016", "0.5747215", "0.5734012", "0.5723737", "0.5716645", "0.57079417", "0.57073694", "0.56997997", "0.5692467", "0.5692288", "0.5679407", "0.5674787", "0.56579083", "0.563123", "0.56207526", "0.56172967", "0.5611884", "0.55935246", "0.55626357", "0.55603117", "0.555348", "0.55521244", "0.55508757", "0.5549598", "0.55483526", "0.5522311", "0.5514843", "0.55140585", "0.5491589", "0.5487176", "0.5484477", "0.5479784", "0.54730153", "0.54709905", "0.5460197", "0.5458467", "0.5453481", "0.5453481", "0.5450505", "0.54360294", "0.5434317", "0.5429355", "0.5428584", "0.54253453", "0.54223293", "0.54204905", "0.54187775", "0.5417261", "0.5412411", "0.5404797", "0.53997403", "0.53932595", "0.53895795", "0.53889143", "0.53877705", "0.5384368", "0.5382987", "0.53735393", "0.5372213", "0.5366789", "0.5365146", "0.53644365", "0.53626484", "0.5360933", "0.5350491", "0.5346982", "0.53437954" ]
0.69971275
0
Test hiding all vaults.
def test_privatize_vaults(self): g = Game() g.add_player(uuid4(), 'p0') g.add_player(uuid4(), 'p1') gs = g p0, p1 = gs.players latrine, insula, statue, road = cm.get_cards(['Latrine', 'Insula', 'Statue', 'Road']) p0.vault.set_content([latrine, insula]) p1.vault.set_content([statue, road]) gs_private = g.privatized_game_state_copy('p1') p0, p1 = gs_private.players self.assertEqual(p0.vault, Zone([Card(-1)]*2, name='vault')) self.assertEqual(p1.vault, Zone([Card(-1)]*2, name='vault'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_yggdrasil_vaults(self):\n pass", "def test_get_asgard_vaults(self):\n pass", "def test_vault_get_all_vault_items(self):\n pass", "def test_vault_get_all_vault_sections(self):\n pass", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def vault(self):", "async def test_denylist(hass: HomeAssistant, mock_client) -> None:\n await _setup(\n hass,\n {\n \"exclude_domains\": [\"fake\"],\n \"exclude_entity_globs\": [\"test.excluded_*\"],\n \"exclude_entities\": [\"not_real.excluded\"],\n },\n )\n\n tests = [\n FilterTest(\"fake.excluded\", False),\n FilterTest(\"light.included\", True),\n FilterTest(\"test.excluded_test\", False),\n FilterTest(\"test.included_test\", True),\n FilterTest(\"not_real.included\", True),\n FilterTest(\"not_real.excluded\", False),\n ]\n\n for test in tests:\n hass.states.async_set(test.id, \"not blank\")\n await hass.async_block_till_done()\n\n was_called = mock_client.labels.call_count == 1\n assert test.should_pass == was_called\n mock_client.labels.reset_mock()", "def test_vault_get_vault_item(self):\n pass", "def test_not_authenticated_non_public_course_with_all_blocks(self):\n self.client.logout()\n self.query_params.pop('username')\n self.query_params['all_blocks'] = True\n self.verify_response(403)", "def test_empty_private_owned(self):\n self.do_visible(True, 'pattieblack', False, is_admin=True)", "def test_not_authenticated_public_course_with_all_blocks(self):\n self.client.logout()\n self.query_params.pop('username')\n self.query_params['all_blocks'] = True\n self.verify_response(403)", "def unseal_vault(self):\n root = self.get_tokens(token_type='root_token')\n if self.check_seal():\n self.logger(\"info\", \"Vault is sealed...Unlocking now...\")\n unlock_keys = self.get_tokens(token_type='keys')\n self.client(self.hostname, token=root).unseal_multi(unlock_keys)\n self.logger(\"info\", \"Unsealed vault\")\n else:\n self.logger(\"info\", \"There are no sealed vaults\")", "def test_vault_delete_vault_section(self):\n pass", "async def test_filtered_denylist(hass: HomeAssistant, mock_client) -> None:\n await _setup(\n hass,\n {\n \"include_entities\": [\"fake.included\", \"test.excluded_test\"],\n \"exclude_domains\": [\"fake\"],\n \"exclude_entity_globs\": [\"*.excluded_*\"],\n \"exclude_entities\": [\"not_real.excluded\"],\n },\n )\n\n tests = [\n FilterTest(\"fake.excluded\", False),\n FilterTest(\"fake.included\", True),\n FilterTest(\"alt_fake.excluded_test\", False),\n FilterTest(\"test.excluded_test\", True),\n FilterTest(\"not_real.excluded\", False),\n FilterTest(\"not_real.included\", True),\n ]\n\n for test in tests:\n hass.states.async_set(test.id, \"not blank\")\n await hass.async_block_till_done()\n\n was_called = mock_client.labels.call_count == 1\n assert test.should_pass == was_called\n mock_client.labels.reset_mock()", "def test_verification_status_invisible(self):\r\n self.client.login(username=\"jack\", password=\"test\")\r\n self.check_verification_status_off('verified', 'You\\'re enrolled as a verified student')\r\n self.check_verification_status_off('honor', 'You\\'re enrolled as an honor code student')\r\n self.check_verification_status_off('audit', 'You\\'re auditing this course')", "def test_vault_delete_vault_item(self):\n pass", "def test_vault_delete_authorization_for_vault_section(self):\n pass", "def test_empty_private(self):\n self.do_visible(True, None, False, is_admin=True)", "def test_auth_private(self):\n self.do_visible(True, None, False, tenant='froggy')", "def test_disable_virt_realm_remote_access(self):\n pass", "def test_vault_get_vault_section(self):\n pass", "def test_auth_private_owned(self):\n self.do_visible(True, 'pattieblack', False, tenant='pattieblack')", "def test_not_logged_in(self):\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n AnonymousUser(), album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n AnonymousUser(), album.display_photo, ALBUM_PUBLIC)", "def test_is_not_hidden(self) -> None:\n path = \"home\"\n result = is_hidden(path)\n self.assertFalse(result)", "def test_switch_hidden1(self):\n self.test_object.switch_hidden()\n self.assertFalse(self.test_object.get_hidden())", "def check_for_no_privates(context):\n json_data = context.response.json()\n\n if \"component_analyses\" in json_data:\n vulnerabilities = json_data['component_analyses']['vulnerability']\n for v in vulnerabilities:\n assert \"cvss\" in v\n assert \"is_private\" in v\n assert \"vendor_cve_ids\" in v\n if v[\"is_private\"]:\n raise Exception(\"Private vulnerability found\")", "def unlock_vault():\n print()\n password_vault = [\n {\n 'type': 'password',\n 'message': 'Enter your vault password:',\n 'name': 'password',\n 'validate': NotEmpty\n }\n ]\n password_answer = prompt(password_vault, style=style)\n passwd = password_answer[\"password\"]\n v = vault.unlock(passwd)\n if v == False:\n unlock_vault()", "def test_empty_public_owned(self):\n self.do_visible(True, 'pattieblack', True, is_admin=True)", "def test_nonVisibilityAffected(self):\n self.assertEquals(visibles(self.observer.idea, iimaginary.IThing), [])\n # XXX need another test: not blocked out from ...", "def any_public_tests(self):\n return any([not t.hidden for t in self.tests])", "def audit():\n governance = web3.ens.resolve('ychad.eth')\n registry = load_registry()\n vaults = load_vaults(registry)\n for v in vaults:\n if v.vault.governance() != governance:\n secho(f'{v.name} vault governance == {v.vault.governance()}', fg='red')\n print(f'{v.vault}.setGovernance({governance})')\n if v.strategy.governance() != governance:\n secho(f'{v.name} strategy governance == {v.strategy.governance()}', fg='red')\n print(f'{v.strategy}.setGovernance({governance})')", "def test_mountain_ocean_is_not_accessible():\n mountain = topo.Mountain()\n ocean = topo.Ocean()\n assert mountain.is_accessible is False\n assert ocean.is_accessible is False", "def check_for_private_vul(context):\n json_data = context.response.json()\n\n if \"component_analyses\" in json_data:\n vulnerabilities = json_data['component_analyses']['vulnerability']\n for v in vulnerabilities:\n if v[\"is_private\"]:\n return\n raise Exception(\"No private vulnerability found\")", "def test_is_hidden(self) -> None:\n path = \".ssh\"\n result = is_hidden(path)\n self.assertTrue(result)", "def test_partial_deck_doesnt_have_ignored_cards(self):\n self.assertEqual(self.ignoredCardPresent, False)", "def test_tenant_secret_page_on_root_domain_not_be_accessible(self):\n response = self.client.get(\n self.secret_url, HTTP_HOST=self.tenant_root_domain)\n self.assertEqual(response.status_code, 403)", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.sodar_uuid)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user, sequencer=self.hiseq2000.sodar_uuid)\n self.response_403()", "def test_anon_private_owned(self):\n self.do_visible(False, 'pattieblack', False)", "async def test_denylist(hass, mock_client):\n handler_method = await _setup(\n hass,\n {\n \"exclude_domains\": [\"fake\"],\n \"exclude_entity_globs\": [\"test.excluded_*\"],\n \"exclude_entities\": [\"not_real.excluded\"],\n },\n )\n\n tests = [\n FilterTest(\"fake.excluded\", False),\n FilterTest(\"light.included\", True),\n FilterTest(\"test.excluded_test\", False),\n FilterTest(\"test.included_test\", True),\n FilterTest(\"not_real.included\", True),\n FilterTest(\"not_real.excluded\", False),\n ]\n\n for test in tests:\n event = make_event(test.id)\n handler_method(event)\n\n was_called = mock_client.labels.call_count == 1\n assert test.should_pass == was_called\n mock_client.labels.reset_mock()", "def testDenyAllowAccess(self):\n self.host.ContinueAuth()\n self.host.SignIn(self.account['username'], self.account['password'])\n self.host.DenyAccess()\n self.host.ContinueAuth()\n self.host.AllowAccess()", "def test_list_virtualization_realm_templates(self):\n pass", "def test_non_trialing(self):\n account = AccountFactory(status=Account.AccountStatus.ACTIVE)\n request = self.rf.get(\"/\")\n request.account = account\n context = {\"request\": request}\n\n context = accounts_tags.trial_banner(context)\n\n assert not context[\"display_banner\"]", "def test_privatize_fountain_card(self):\n g = Game()\n g.add_player(uuid4(), 'p0')\n g.add_player(uuid4(), 'p1')\n\n gs = g\n p0, p1 = gs.players\n\n latrine, insula, statue, road = cm.get_cards(['Latrine', 'Insula', 'Statue', 'Road'])\n p0.fountain_card = latrine\n\n gs_private = g.privatized_game_state_copy('p1')\n p0, p1 = gs_private.players\n\n self.assertEqual(p0.fountain_card, Card(-1))", "def test_get_hidden(self):\n self.assertTrue(self.test_object.get_hidden())", "def test_no_list_listings(self):\n pool_name = p_n()\n fs_name = fs_n()\n StratisCli.pool_create(pool_name, block_devices=DISKS)\n StratisCli.fs_create(pool_name, fs_name)\n\n self.assertEqual(StratisCli.pool_list(), StratisCli.pool_list(False))\n self.assertEqual(StratisCli.fs_list(), StratisCli.fs_list(False))\n self.assertEqual(StratisCli.blockdev_list(),\n StratisCli.blockdev_list(False))", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.vendor_id)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def test_unauthenticated_service_blocked(self):\n raise NotImplementedError # FIXME", "def test_whitelist_zero_access_policies(self):\n p = self.load_policy({\n 'name': 'test-key-vault',\n 'resource': 'azure.keyvault',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'glob',\n 'value_type': 'normalize',\n 'value': 'cckeyvault2*'},\n {'not': [\n {'type': 'whitelist',\n 'key': 'principalName',\n 'users': ['account1@sample.com']}\n ]}\n ]\n })\n resources = p.run()\n self.assertEqual(len(resources), 0)", "def test_volumes_get(self):\n pass", "def test_switch_hidden2(self):\n self.test_object.switch_hidden()\n self.test_object.switch_hidden()\n self.assertTrue(self.test_object.get_hidden())", "def test_get_all_accessible_by_hash_list_as_user_does_not_return_other_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.user2_template.hash], request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def testGetAccessDenied(self):\n self.runGet(None)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def test_get_all_accessible_by_hash_as_staff_does_not_return_other_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user2_template.hash, request=mock_request\n )\n self.assertTrue(self.fixture.user1_template not in list(templates))\n self.assertTrue(self.fixture.user2_template not in list(templates))\n self.assertTrue(self.fixture.global_template not in list(templates))", "def test_get_all_accessible_by_hash_list_as_staff_does_not_return_other_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.user2_template.hash], request=mock_request\n )\n self.assertTrue(self.fixture.user1_template not in list(templates))\n self.assertTrue(self.fixture.user2_template not in list(templates))\n self.assertTrue(self.fixture.global_template not in list(templates))", "def not_test_without_user(self):\n # TODO", "def test_vault_update_vault_section(self):\n pass", "def test_lta_good(self):\n self.assertIsNone(api.inventory.check(self.lta_order_good))", "def test_extra_field_when_not_requested(self):\n self.client.login(username=self.admin_user.username, password='test')\n response = self.verify_response(params={\n 'all_blocks': True,\n 'requested_fields': ['course_visibility'],\n })\n self.verify_response_block_dict(response)\n for block_data in response.data['blocks'].values():\n assert 'other_course_settings' not in block_data\n\n self.assert_in_iff(\n 'course_visibility',\n block_data,\n block_data['type'] == 'course'\n )", "def hide_potentially_fake_levels(self, hide):\n return self._toggle_filter(Filters.PotentiallyFake, hide)", "def test_visibility_of_not_available_2(self):\n partner = PartnerFactory(status=Partner.NOT_AVAILABLE)\n list_url = reverse('partners:list')\n\n editor = EditorFactory()\n\n request = RequestFactory().get(list_url)\n request.user = editor.user\n response = PartnersListView.as_view()(request)\n\n self.assertNotContains(response, partner.get_absolute_url())", "def test_all_hidden(self):\r\n response = self.client.post(self.url, self.url_params)\r\n self.assertEqual(response.status_code, 200)\r\n obj = json.loads(response.content)\r\n self.assertTrue(obj['success'])", "def test_habits_without_trackings():\n untracked_habits = analytics.habits_without_trackings(analytics.habits_table,\n [])\n assert untracked_habits == []", "def test_unauthenticated_get(self):\n url = reverse('edit-list')\n\n response = self.client.get(url)\n self.assertEqual(403, response.status_code)\n self.assertEqual('Forbidden', response.status_text)\n self.assertTrue(\n 'credentials were not provided.' in response.data.get('detail'))", "def testGetAccessAllowed(self):\n for user in (self.guest, self.contributor, self.delegate, self.owner, self.root):\n response = self.runGet(user, sequencer=self.hiseq2000.vendor_id)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(data[\"sodar_uuid\"], str(self.hiseq2000.sodar_uuid))", "def test_get_all_accessible_by_hash_as_user_does_not_return_other_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user2_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_visible_blacklisted(self):\n\n self.feature_test.set_percentage(100)\n self.feature_test.add_to_blacklist(3)\n self.assertFalse(self.feature_test.is_visible(3))", "def test_remove_virt_realm(self):\n pass", "def test_get_all_accessible_by_hash_list_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.user1_template.hash], request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def prune(self, vault_client):\n existing = getattr(vault_client,\n SecretBackend.list_fun)()['data'].items()\n for mount_name, _values in existing:\n # ignore system paths and cubbyhole\n mount_path = normalize_vault_path(mount_name)\n if mount_path.startswith('sys') or mount_path == 'cubbyhole':\n continue\n\n exists = [resource.path\n for resource in self.mounts()\n if normalize_vault_path(resource.path) == mount_path]\n\n if not exists:\n LOG.info(\"removed unknown mount %s\", mount_path)\n getattr(vault_client, SecretBackend.unmount_fun)(mount_path)", "async def test_filtered_denylist(hass, mock_client):\n handler_method = await _setup(\n hass,\n {\n \"include_entities\": [\"fake.included\", \"test.excluded_test\"],\n \"exclude_domains\": [\"fake\"],\n \"exclude_entity_globs\": [\"*.excluded_*\"],\n \"exclude_entities\": [\"not_real.excluded\"],\n },\n )\n\n tests = [\n FilterTest(\"fake.excluded\", False),\n FilterTest(\"fake.included\", True),\n FilterTest(\"alt_fake.excluded_test\", False),\n FilterTest(\"test.excluded_test\", True),\n FilterTest(\"not_real.excluded\", False),\n FilterTest(\"not_real.included\", True),\n ]\n\n for test in tests:\n event = make_event(test.id)\n handler_method(event)\n\n was_called = mock_client.labels.call_count == 1\n assert test.should_pass == was_called\n mock_client.labels.reset_mock()", "def test_list_auth(self):\n self.api_client.logout()\n resp = self.api_client.get('/api/metadata/tracks/')\n self.assertEqual(resp.status_code, 403)", "def test_empty_shared(self):\n self.do_sharable(False, 'pattieblack', None, is_admin=True)\n self.do_sharable(False, 'pattieblack', FakeMembership(True),\n is_admin=True)", "def testGetAccessAllowed(self):\n for user in (self.guest, self.contributor, self.delegate, self.owner, self.root):\n response = self.runGet(user, sequencer=self.hiseq2000.sodar_uuid)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(data[\"sodar_uuid\"], str(self.hiseq2000.sodar_uuid))", "def test_systemtype_list_api_unauthorized(self):\n\n # get response\n response = self.client.get('/api/systemtype/')\n # compare\n self.assertEqual(response.status_code, 401)", "def test_empty_public(self):\n self.do_visible(True, None, True, is_admin=True)", "def test_list_zones_ignore_access_default_false(shared_zone_test_context):\n result = shared_zone_test_context.list_zones_client.list_zones(status=200)\n assert_that(result[\"ignoreAccess\"], is_(False))", "def test_need_login_to_see_meterlist(self):\n response = self.client.get(reverse('api_v1:meter-list'), follow=True)\n self.assertEqual(response.status_code, 403)", "def test_get_dealer_active_inventory(self):\n pass", "def test_listing_from_wall_when_blocked_some_users(self):", "def test_systemtype_detail_api_unauthorized (self):\n\n # get object\n systemtype_api_1 = Systemtype.objects.get(systemtype_name='systemtype_api_1')\n # get response\n response = self.client.get('/api/systemtype/' + str(systemtype_api_1.systemtype_id) + '/')\n # compare\n self.assertEqual(response.status_code, 401)", "def test_list_zones_ignore_access_success(shared_zone_test_context):\n result = shared_zone_test_context.list_zones_client.list_zones(ignore_access=True, status=200)\n retrieved = result[\"zones\"]\n\n assert_that(result[\"ignoreAccess\"], is_(True))\n assert_that(len(retrieved), greater_than(5))", "def test_list_zones_no_authorization(shared_zone_test_context):\n shared_zone_test_context.list_zones_client.list_zones(sign_request=False, status=401)", "def noaccess(self):\n self.assertEqual(self.client.get(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.post(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.delete(self.ENDPOINT).status_code, 403)", "def test_orphan(self, mock_rm_vopts):\n def validate_rm_vopts(vgwrap, vopts, **kwargs):\n # Two of the VSCSI mappings have storage; both are vopts\n self.assertEqual(2, len(vopts))\n mock_rm_vopts.side_effect = validate_rm_vopts\n vwrap = self.vio_feed[0]\n # Save the \"before\" sizes of the mapping lists\n vscsi_len = len(vwrap.scsi_mappings)\n vfc_len = len(vwrap.vfc_mappings)\n ts.add_orphan_storage_scrub_tasks(self.ftsk)\n ret = self.ftsk.execute()\n # One for vscsi maps, one for vfc maps, one for vopt storage\n self.assertEqual(3, self.logfx.patchers['warn'].mock.call_count)\n # Pull out the WrapperTask returns from the (one) VIOS\n wtr = ret['wrapper_task_rets'].popitem()[1]\n vscsi_removals = wtr['vscsi_removals_orphans']\n self.assertEqual(18, len(vscsi_removals))\n # Removals are really orphans\n for srm in vscsi_removals:\n self.assertIsNone(srm.client_adapter)\n # The right number of maps remain.\n self.assertEqual(vscsi_len - 18, len(vwrap.scsi_mappings))\n # Remaining maps are not orphans.\n for smp in vwrap.scsi_mappings:\n self.assertIsNotNone(smp.client_adapter)\n # _RemoveOrphanVfcMaps doesn't \"provide\", so the following are limited.\n # The right number of maps remain.\n self.assertEqual(vfc_len - 19, len(vwrap.vfc_mappings))\n # Remaining maps are not orphans.\n for fmp in vwrap.vfc_mappings:\n self.assertIsNotNone(fmp.client_adapter)\n # POST was warranted.\n self.assertEqual(1, self.txfx.patchers['update'].mock.call_count)\n # _RemoveStorage invoked _rm_vopts\n self.assertEqual(1, mock_rm_vopts.call_count)", "def test_public_course_all_blocks_and_empty_username(self):\n self.query_params['username'] = ''\n self.query_params['all_blocks'] = True\n # Verify response for a regular user.\n self.verify_response(403, cacheable=False)\n # Verify response for an unenrolled user.\n CourseEnrollment.unenroll(self.user, self.course_key)\n self.verify_response(403, cacheable=False)\n # Verify response for an anonymous user.\n self.client.logout()\n self.verify_response(403, cacheable=False)\n # Verify response for a staff user.\n self.client.login(username=self.admin_user.username, password='test')\n self.verify_response(cacheable=False)", "def test_unlocked_asset(self):\r\n self.client.logout()\r\n resp = self.client.get(self.url_unlocked)\r\n self.assertEqual(resp.status_code, 200) # pylint: disable=E1103\r", "def test_secrets_list_server_not_reachable():\n message = \"REANA client is not connected to any REANA cluster.\"\n reana_token = \"000000\"\n runner = CliRunner()\n result = runner.invoke(cli, [\"secrets-list\", \"-t\", reana_token])\n assert result.exit_code == 1\n assert message in result.output", "def list_vaults(max_vaults=10, iter_marker=None):\n\n # Retrieve vaults\n glacier = boto3.client('glacier')\n if iter_marker is None:\n vaults = glacier.list_vaults(limit=str(max_vaults))\n else:\n vaults = glacier.list_vaults(limit=str(max_vaults), marker=iter_marker)\n marker = vaults.get('Marker') # None if no more vaults to retrieve\n return vaults['VaultList'], marker", "def _unfiled_box():\n return db.box((db.box.name == 'Unfiled') & (db.box.owner == auth.user.id))", "def test_disabled_method(api_client):\n\n response = api_client().get(\"/anything/disabled_method\")\n assert response.status_code == 403", "def test_no_unlisted(self):\n Version.objects.get(pk=self.version_1_2_2).update(\n channel=amo.RELEASE_CHANNEL_UNLISTED)\n self.addon.reload()\n assert self.addon.status == amo.STATUS_PUBLIC\n version, file = self.get('1.2', self.version_int,\n self.app, self.platform)\n assert version == self.version_1_2_1", "def canUnlockAll(boxes):\n res = []\n res.append(0)\n if boxes[0] == []:\n return False\n for i in range(len(boxes)):\n for j in boxes[i]:\n if j not in res and j < len(boxes) and j != i:\n res.append(j)\n if len(res) != len(boxes):\n return False\n return True", "def test_disabled(self):\n content = self.unique()\n self.assertViewBehavior({\n \"cache_control\": False,\n \"cache_control_public\": True,\n \"get\": content},\n status_code=200,\n content=content,\n headers_exclude=\"Cache-Control\")", "def test_get_all_accessible_by_hash_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_vault_create_authorization_for_vault_section(self):\n pass", "def test_list_containers_with_non_authorized_user(self):\n\n test_auth_provider = self.os_operator.auth_provider\n # Get auth for the test user\n test_auth_provider.auth_data\n\n # Get fresh auth for test user and set it to next auth request for\n # account_client\n delattr(test_auth_provider, 'auth_data')\n test_auth_new_data = test_auth_provider.auth_data\n self.account_client.auth_provider.set_alt_auth_data(\n request_part='headers',\n auth_data=test_auth_new_data\n )\n\n params = {'format': 'json'}\n # list containers with non-authorized user token\n self.assertRaises(lib_exc.Forbidden,\n self.account_client.list_account_containers,\n params=params)", "def test_page_list_unauthorised(self):\n user = self.get_superuser()\n title_1 = 'page'\n title_2 = 'inner'\n title_3 = 'page 3'\n page = create_page(title_1, 'page.html', 'en', published=True)\n page_2 = create_page(title_2, 'page.html', 'en', published=True, parent=page)\n page_3 = create_page(title_3, 'page.html', 'en', published=False)\n\n url = reverse('api:page-list')\n response = self.client.get(url, format='json')\n self.assertEqual(len(response.data), 2)\n for page in response.data:\n self.assertIn(page.get('title'), {title_1, title_2})", "def test_anon_private(self):\n self.do_visible(True, None, False)", "def canUnlockAll(boxes):\n for key in range(1, len(boxes) - 1):\n res = False\n for index in range(len(boxes)):\n res = key in boxes[index] and key != index\n if res:\n break\n if res is False:\n return res\n return True" ]
[ "0.73062766", "0.6963295", "0.6804855", "0.64582133", "0.63372976", "0.6049422", "0.59896415", "0.59509873", "0.5787047", "0.57756597", "0.5770837", "0.57636744", "0.57540184", "0.5746007", "0.5736347", "0.5734542", "0.5626461", "0.5625377", "0.5586302", "0.55808043", "0.5574537", "0.5540431", "0.55387664", "0.5501347", "0.547374", "0.54698306", "0.5449806", "0.5441284", "0.54388696", "0.53972435", "0.53936845", "0.5383247", "0.5373023", "0.5357938", "0.53554726", "0.5342284", "0.53339654", "0.53334177", "0.53277755", "0.5324785", "0.53242505", "0.53235734", "0.53186035", "0.53150636", "0.5313612", "0.5310187", "0.53100586", "0.5276593", "0.5275006", "0.5273324", "0.5272451", "0.5265874", "0.52548945", "0.52546114", "0.5249361", "0.52462", "0.524398", "0.5242547", "0.52398765", "0.5237972", "0.52187777", "0.5217836", "0.52106404", "0.5206746", "0.5194321", "0.51916075", "0.51907754", "0.5159625", "0.5156042", "0.5149634", "0.5146811", "0.5145288", "0.51411426", "0.51410943", "0.51347256", "0.51277846", "0.51137006", "0.5111561", "0.5105849", "0.51015437", "0.50946456", "0.5092201", "0.5087413", "0.5083859", "0.5079295", "0.50775194", "0.507717", "0.50768995", "0.5076449", "0.50704575", "0.5069802", "0.50649047", "0.5060829", "0.5057021", "0.5053361", "0.50523543", "0.50456095", "0.5043558", "0.503361", "0.503321" ]
0.6935641
2
Test hiding the card revealed with the fountain.
def test_privatize_fountain_card(self): g = Game() g.add_player(uuid4(), 'p0') g.add_player(uuid4(), 'p1') gs = g p0, p1 = gs.players latrine, insula, statue, road = cm.get_cards(['Latrine', 'Insula', 'Statue', 'Road']) p0.fountain_card = latrine gs_private = g.privatized_game_state_copy('p1') p0, p1 = gs_private.players self.assertEqual(p0.fountain_card, Card(-1))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_play_no_gain(self):\n self.card = self.g[\"Festival\"].remove()\n self.plr.piles[Piles.HAND].set(\"Duchy\")\n self.plr.add_card(self.card, Piles.HAND)\n self.plr.favors.set(2)\n self.plr.test_input = [\"No\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.favors.get(), 2)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 1)", "def hide_card(self):\n try:\n self.hidden_card_value = self.hand[1]\n self.hand[1] = Card()\n except IndexError:\n print('The dealer does not have enough cards!')", "def test_switch_hidden1(self):\n self.test_object.switch_hidden()\n self.assertFalse(self.test_object.get_hidden())", "def test_switch_hidden2(self):\n self.test_object.switch_hidden()\n self.test_object.switch_hidden()\n self.assertTrue(self.test_object.get_hidden())", "def test_discard_action(self):\n self.plr.test_input = [\"discard silver\", \"finish selecting\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 2)\n self.assertEqual(self.plr.actions.get(), 2)\n self.assertEqual(self.plr.buys.get(), 1)\n self.assertNotIn(\"Silver\", self.plr.piles[Piles.HAND])", "def reveal_top_card(self):\n if self.get_length() != 0:\n if not self.get_topmost_card().get_exposed():\n self.get_topmost_card().flip_card()", "def reveal_card(self):\n self.hand[1] = self.hidden_card_value\n self.hidden_card_value = Card()", "def test_get_css_indicator_hidden(\n self,\n display_correctness,\n result,\n ):\n self.xblock.display_correctness = display_correctness\n test_result = self.xblock.get_css_indicator_hidden()\n self.assertEquals(result, test_result)", "def show_card(self):\n return self.hands.show(0)", "def test_partial_deck_doesnt_have_ignored_cards(self):\n self.assertEqual(self.ignoredCardPresent, False)", "def test_discard_buy(self):\n self.plr.test_input = [\"finish selecting\", \"discard gold\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 2)\n self.assertEqual(self.plr.actions.get(), 1)\n self.assertEqual(self.plr.buys.get(), 2)\n self.assertNotIn(\"Gold\", self.plr.piles[Piles.HAND])", "def test_get_hidden(self):\n self.assertTrue(self.test_object.get_hidden())", "def test_card_show(mock_card, capsys):\n mock_card.show()\n captured = capsys.readouterr()\n assert captured.out == \"SPADE, 1\\n\"", "def is_hidden():\n return False", "def is_hidden():\n return False", "def cardDiscardable(self, card):\n if self.cardDead(card):\n return True\n\n cardAttr = \"\"\n if Suit.toString(card.getSuit()) == \"white\":\n cardAttr = \"w\"\n elif Suit.toString(card.getSuit()) == \"blue\":\n cardAttr = \"b\"\n elif Suit.toString(card.getSuit()) == \"red\":\n cardAttr = \"r\"\n elif Suit.toString(card.getSuit()) == \"green\":\n cardAttr = \"g\"\n elif Suit.toString(card.getSuit()) == \"yellow\":\n cardAttr = \"y\"\n\n if card.getValue() == 1:\n cardAttr += \"1\"\n elif card.getValue() == 2:\n cardAttr += \"2\"\n elif card.getValue() == 3:\n cardAttr += \"3\"\n elif card.getValue() == 4:\n cardAttr += \"4\"\n elif card.getValue() == 5:\n cardAttr += \"5\"\n\n if card.getValue() == 1:\n if self.discardedDict[cardAttr] < 2:\n self.discardedDict[cardAttr] += 1\n # print(3 - self.discardedDict[cardAttr], \"card remaining for \", cardAttr)\n return True\n elif card.getValue() == 2 or card.getValue() == 3 or card.getValue() == 4:\n if self.discardedDict[cardAttr] < 1:\n self.discardedDict[cardAttr] += 1\n # print(2 - self.discardedDict[cardAttr], \"card remaining for \", cardAttr)\n return True\n elif card.getValue() == 5:\n if self.discardedDict[cardAttr] < 0:\n self.discardedDict[cardAttr] += 1\n # print(1 - self.discardedDict[cardAttr], \"card remaining for \", cardAttr)\n return True\n # print(\"Useful card\")\n return False", "def hidden():\n return False", "def test_set_hidden(self):\n self.test_object.set_hidden(False)\n self.assertFalse(self.test_object.get_hidden())", "def test_visible_blacklisted(self):\n\n self.feature_test.set_percentage(100)\n self.feature_test.add_to_blacklist(3)\n self.assertFalse(self.feature_test.is_visible(3))", "def test_is_not_hidden(self) -> None:\n path = \"home\"\n result = is_hidden(path)\n self.assertFalse(result)", "def test_play_card(self):\n self.plr.piles[Piles.DECK].set(\"Silver\", \"Province\", \"Moat\", \"Gold\")\n self.vic.piles[Piles.DECK].set(\"Duchy\")\n self.plr.test_input = [\"discard\", \"discard\", \"putback\"]\n self.plr.play_card(self.card)\n self.g.print_state()\n self.assertEqual(self.plr.actions.get(), 1)\n self.assertIn(\"Duchy\", self.vic.piles[Piles.DISCARD])\n self.assertIn(\"Gold\", self.plr.piles[Piles.DISCARD])\n self.assertIn(\"Province\", self.plr.piles[Piles.HAND])\n self.assertIn(\"Moat\", self.plr.piles[Piles.HAND])\n self.assertIn(\"Silver\", self.plr.piles[Piles.DECK])", "def test_play_nobane(self):\n self.victim.piles[Piles.HAND].set(\"Copper\", \"Silver\")\n self.attacker.piles[Piles.HAND].set(\n \"Copper\", \"Silver\", \"Gold\", \"Duchy\", \"Province\"\n )\n self.attacker.add_card(self.card, Piles.HAND)\n self.attacker.test_input = [\"Duchy\", \"Province\", \"finish\"]\n self.attacker.play_card(self.card)\n try:\n self.assertIn(self.g[self.g._bane].cost, (2, 3))\n self.assertEqual(self.attacker.piles[Piles.HAND].size(), 5 + 2 - 2)\n self.assertIn(\"Curse\", self.victim.piles[Piles.DISCARD])\n except AssertionError: # pragma: no cover\n print(f\"Bane={self.g._bane}\")\n self.g.print_state()\n raise", "def test_play(self):\n self.card = self.g[\"Festival\"].remove()\n self.plr.piles[Piles.HAND].set(\"Duchy\")\n self.plr.add_card(self.card, Piles.HAND)\n self.plr.favors.set(2)\n self.plr.test_input = [\"Gain\"]\n self.plr.play_card(self.card)\n self.g.print_state()\n self.assertEqual(self.plr.favors.get(), 1)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 1 + 1)", "def test_gain(self):\n self.plr.piles[Piles.DECK].set(\"Duchy\")\n self.plr.test_input = [\"Get Estate\"]\n self.plr.gain_card(\"Cursed Village\")\n self.assertNotIn(\"Curse\", self.plr.piles[Piles.DISCARD])\n self.assertIsNotNone(self.plr.piles[Piles.DISCARD][\"Estate\"])\n self.assertIn(\"Duchy\", self.g.trashpile)", "def test_show_correctness_never(self, has_staff_access):\n assert not ShowCorrectness.correctness_available(show_correctness=ShowCorrectness.NEVER,\n has_staff_access=has_staff_access)", "def hide_button_hit(spr, x, y):\n _red, _green, _blue, _alpha = spr.get_pixel((x, y))\n if _red == HIT_HIDE:\n return True\n else:\n return False", "def test_card_suit(mock_card):\n assert mock_card.suit == Suit.SPADE", "def hide_potentially_fake_levels(self, hide):\n return self._toggle_filter(Filters.PotentiallyFake, hide)", "def test_get_hidden_on_interdiff(self) -> None:\n self.assertFalse(self.action.get_visible(\n context=self._create_request_context(url_name='view-interdiff')))", "def should_show():", "def is_hidden(self):\n return self.has_label(HIDDEN_LABEL)", "def hide(self):\n self.course.quick_action(self.id, 'hide')", "def test_for_non_blackjack(self):\n hand = self._hand\n cards = [BjCard('clubs', '8'), BjCard('diamonds', '8')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.is_blackjack, False)", "def test_default_hidden_not_in_attributes(self):\n self.assertNotIn(\n ATTR_HIDDEN,\n self.hass.states.get(self.entity.entity_id).attributes)", "def test_is_hidden(self) -> None:\n path = \".ssh\"\n result = is_hidden(path)\n self.assertTrue(result)", "def hide_fake_levels(self, hide):\n return self._toggle_filter(Filters.Fake, hide)", "def test_play_bane(self):\n self.victim.piles[Piles.HAND].set(\"Copper\", \"Silver\", self.g._bane)\n self.attacker.piles[Piles.HAND].set(\n \"Copper\", \"Silver\", \"Gold\", \"Duchy\", \"Province\"\n )\n self.attacker.add_card(self.card, Piles.HAND)\n self.attacker.test_input = [\"Duchy\", \"Province\", \"finish\"]\n self.attacker.play_card(self.card)\n try:\n self.assertNotIn(\"Curse\", self.victim.piles[Piles.DISCARD])\n except AssertionError: # pragma: no cover\n print(f\"Bane={self.g._bane}\")\n self.g.print_state()\n raise", "def hide(self, item_id):\n pass", "def hide_correctness(self):\n self.hiddens.add('correct')\n self.hiddens.add('score')", "def _test_display_down_button(self):\n return (self.product_displays.top_index + self.limits.screen_products) < len(self.product_displays)", "def flip(self, pile: int) -> bool:\n top_card = self._piles[pile].top_card\n if top_card is not None and not top_card.visible:\n top_card.visible = True\n return True\n return False", "def test_discard(self):\r\n deck_size = 3\r\n d = Deck(deck_size)\r\n for _ in range(deck_size):\r\n d.draw()\r\n d.discard([1, 3])\r\n drawn = d.draw(2)\r\n self.assertEqual(len(drawn), 2)\r\n self.assertIn(1, drawn)\r\n self.assertIn(3, drawn)", "def test_for_dealing_card():\n deck1 = Shoe()\n deck1.deal_card()\n assert len(deck1.deck) == 51", "def showdown(self):\n print \"%s: %s\" %(self.name, repr(self.cards)) # open dealer's cards\n for player in self.game.players:\n win = self.balance(player)\n if win > 0: \n print player.name, 'wins', win\n elif win == 0: \n print player.name, 'draws'\n elif win <0:\n print player.name, 'loses', -(win) \n self.budget -= win\n player.budget += win\n print 'budget of %s : %s'%(player.name,player.budget)\n print 'budget of %s : %s'%(self.name,self.budget)", "def do_hf_hide(self, arg):\n self.show_hidden_frames = False\n self.refresh_stack()", "def test_hiding_demo_state(self):\n demo.setup(self.hass, {demo.DOMAIN: {'hide_demo_state': 1}})\n\n self.assertIsNone(self.hass.states.get('a.Demo_Mode'))", "def hidden(self) -> bool:\n return False", "def hidden(self) -> bool:\n return False", "def toggle_hidden(self):\n if self.hidden:\n self.show()\n else:\n self.hide()", "def hide(self):\n self.visible = False", "def Hide(self):\r\n \r\n return self.SetFlag(self.optionHidden, True)", "def ensure_hidden(self):\n self.set_visible(False)", "def cardDead(self, card):\n return card.getValue() <= self.field[Suit.toInt(card.getSuit()) - 1]", "def unhide(self):\n self.course.quick_action(self.id, 'show')", "def test_hiddenpart(self):\n testfile='hiddenpart.eml'\n try:\n tmpfile = tempfile.NamedTemporaryFile(\n suffix='hidden', prefix='fuglu-unittest', dir='/tmp')\n shutil.copy(\"%s/%s\" % (TESTDATADIR, testfile), tmpfile.name)\n\n user = 'recipient-hiddenpart@unittests.fuglu.org'\n conffile = self.tempdir + \"/%s-filetypes.conf\" % user\n # the largefile in the test message is just a bunch of zeroes\n open(conffile, 'w').write(\n \"deny application\\/zip no zips allowed\")\n self.rulescache._loadrules()\n suspect = Suspect(\n 'sender@unittests.fuglu.org', user, tmpfile.name)\n\n result = self.candidate.examine(suspect)\n if type(result) is tuple:\n result, message = result\n self.assertEqual(\n result, DELETE, 'hidden message part was not detected')\n\n finally:\n tmpfile.close()\n os.remove(conffile)", "def test_get_hidden_on_diff_viewer(self) -> None:\n self.assertTrue(self.action.get_visible(\n context=self._create_request_context(url_name='view-diff')))", "def test_play(self):\n self.plr.piles[Piles.DECK].set(\"Province\")\n self.plr.add_card(self.card, Piles.HAND)\n self.plr.test_input = [\"keep\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.coins.get(), 2)\n self.assertIn(\"Province\", self.plr.piles[Piles.DECK])\n self.assertNotIn(\"Province\", self.plr.piles[Piles.DISCARD])", "def test_bonus_disc_no_bonus(self):\r\n gm_discs = set([1, 2])\r\n sp_discs = set([1, 2])\r\n self.assertFalse(gmspotify._bonus_disc_added(gm_discs, sp_discs))", "def hide(self):\n super(SkillShotScreen, self).hide()\n \n if self.cube != None: self.cube.clearLight(self.dlnp)\n if self.cubeMovement: self.cubeMovement.finish()\n if self.cubeRotation: self.cubeRotation.finish()\n if self.awardMovement: self.awardMovement.finish()\n base.taskMgr.remove('award_advance')\n if self._awardText != None: self._awardText.hide()", "def test_get_hidden_on_review_request(self) -> None:\n self.assertTrue(self.action.get_visible(\n context=self._create_request_context(\n url_name='review-request-detail')))", "def test_should_render_with_discarded(self) -> None:\n self.assertFalse(self.action.should_render(\n context=self._create_request_context(\n status=ReviewRequest.DISCARDED)))", "def hide( self, event=None ):\n self.visible = 0\n self.withdraw()", "def hide( self, event=None ):\n self.visible = 0\n self.withdraw()", "def hide(self, event=None):\r\n self.visible = 0\r\n self.withdraw()", "def hide(self, event=None):\r\n self.visible = 0\r\n self.withdraw()", "def testDiscTender(self):\n self.setupTransaction()\n if not checkout.pay_card(card_name='Discover'):\n tc_fail(\"Failed to pay with Discover credit tender\")\n self.handleMessages()", "def deal(self):\n self.dealer.hit(self.deck)\n self.dealer.hit(self.deck)\n self.player.hit(self.deck)\n self.player.hit(self.deck)\n\n if self.player.sum_cards() == 21:\n self.round_winner = True\n self.print_hands()\n print(\"BLACKJACK! You win!\")", "def do_hf_unhide(self, arg):\n self.show_hidden_frames = True\n self.refresh_stack()", "def test_show_correctness_default(self):\n assert ShowCorrectness.correctness_available()", "def test_play_card(self):\n while True:\n card = self.g[\"Clashes\"].remove()\n if card.name == \"Battle Plan\":\n break\n self.plr.piles[Piles.DECK].set(\"Gold\")\n self.plr.piles[Piles.HAND].set(\"Estate\", \"Militia\")\n self.plr.add_card(card, Piles.HAND)\n self.plr.test_input = [\"Reveal Militia\", \"Rotate Clashes\"]\n self.plr.play_card(card)\n self.assertIn(\"Gold\", self.plr.piles[Piles.HAND])\n next_card = self.g[\"Clashes\"].remove()\n self.assertEqual(next_card.name, \"Archer\")", "def dealer_turn(self):\n self.dealer.reveal()\n show_table_later(self.player, self.dealer, self.pot)\n while self.dealer.hand.value < 17:\n self.dealer.take_card(self.deck)\n show_table_later(self.player, self.dealer, self.pot)", "def test_overwriting_hidden_property_to_false(self):\n entity.Entity.overwrite_attribute(self.entity.entity_id,\n [ATTR_HIDDEN], [False])\n self.entity.hidden = True\n self.entity.update_ha_state()\n\n self.assertNotIn(\n ATTR_HIDDEN,\n self.hass.states.get(self.entity.entity_id).attributes)", "def testcheatFalse(self):\n import Cheat\n res = Cheat.cheatclass.cheatF(self)\n exp = Cheat.cheatclass.cheatingR(self)\n\n self.assertFalse(res, exp)", "def choose_card_to_discard(self):\n random.choice(self.hand.card_list).use()", "def hide(self, event=None):\n self.visible = 0\n self.withdraw()", "def _discardCallFrame( self , fName ) :\n if fName == self.debuggerFName or self.hide:\n self.hide = self.hide + 1\n if self.hide:\n return True\n return False", "def action_hit(self) -> None:\n print(self.deal_card(self.user))", "def toggle_hidden(self):\n AbstractChild.toggle_hidden(self)\n self.accFrame.update_values()\n self.botFrame.update_values()\n # On toggle hidden\n self.on_toggle_hidden()", "def test_canceled_unopposed(self):\n s1 = self.battle.create_skirmish(self.alice, 10) # Attack 10\n s1a = s1.react(self.bob, 8,\n troop_type=\"cavalry\") # --Attack 8 (12)\n s1a.react(self.alice, 6,\n troop_type=\"ranged\") # ----Attack 6 (9)\n s1.resolve()\n self.assertEqual(s1.victor, self.alice.team)\n self.assert_(s1.unopposed)\n\n # Should be 20 VP (double the 10 it'd ordinarily be worth)\n self.assertEqual(s1.vp, 20)", "async def test_get_actions_hidden_auxiliary(\n hass: HomeAssistant,\n device_registry: dr.DeviceRegistry,\n entity_registry: er.EntityRegistry,\n hidden_by,\n entity_category,\n) -> None:\n config_entry = MockConfigEntry(domain=\"test\", data={})\n config_entry.add_to_hass(hass)\n device_entry = device_registry.async_get_or_create(\n config_entry_id=config_entry.entry_id,\n connections={(dr.CONNECTION_NETWORK_MAC, \"12:34:56:AB:CD:EF\")},\n )\n entity_entry = entity_registry.async_get_or_create(\n DOMAIN,\n \"test\",\n \"5678\",\n device_id=device_entry.id,\n entity_category=entity_category,\n hidden_by=hidden_by,\n supported_features=CoverEntityFeature.CLOSE,\n )\n expected_actions = []\n expected_actions += [\n {\n \"domain\": DOMAIN,\n \"type\": action,\n \"device_id\": device_entry.id,\n \"entity_id\": entity_entry.id,\n \"metadata\": {\"secondary\": True},\n }\n for action in [\"close\"]\n ]\n actions = await async_get_device_automations(\n hass, DeviceAutomationType.ACTION, device_entry.id\n )\n assert actions == unordered(expected_actions)", "def __play_delear(self, state : State):\n # print (\"Playing as dealer\")\n dealer_sum = state.max_safe_sum(dealer=True)\n assert (-1 <= dealer_sum <= 31)\n while (0 <= dealer_sum < 25):\n # Keep hitting\n card, suite = self.draw()\n state.update_state (card, suite, dealer=True)\n dealer_sum = state.max_safe_sum(dealer=True)\n assert (-1 <= dealer_sum <= 31)\n\n return dealer_sum", "def mock_card():\n return Card(Suit.SPADE, 1)", "def test_deal_insufficient_cards(self):\n cards = self.deck._deal(100)\n self.assertEqual(len(cards), 52)\n self.assertEqual(self.deck.count(), 0)", "def use_trait_card(self, card):\n # remove card from the list\n self.num_cards.set(self.num_cards.get()-1)\n self.card_list.remove(card)\n #temp = self.card_list.pop(self.card_list.index(card))\n #check if hand is now empty\n if self.num_cards.get() == 0:\n self.empty.set(True)\n self.toggle_empty_hand()\n card.pack_forget() \n card.change_hands() # forget that you belonged to this hand!\n self.player.make_payment(card)", "def hideIsoSurfaces(self):\n #research\n profprint()\n contourNode = slicer.util.getNode(self.contourNode)\n widget = slicer.modules.NeedleFinderWidget\n if contourNode != None:\n contourNode.SetDisplayVisibility(abs(widget.hideContourButton.isChecked()-1))\n contourNode.GetModelDisplayNode().SetSliceIntersectionVisibility(abs(widget.hideContourButton.isChecked()-1))", "def is_visible(self):\n return self.real > 0", "def war_tie(cls, card1, card2):\n print(\"------------------------------------------------\")\n print(\"Tie!!\")\n print(f\"{card1.show()} is equal to {card2.show()}\")\n print(\"------------------------------------------------\")", "def test_nonVisibilityAffected(self):\n self.assertEquals(visibles(self.observer.idea, iimaginary.IThing), [])\n # XXX need another test: not blocked out from ...", "def test_DisplayReturnsNone(self):\r\n self.assertEqual(self.tv._display([]), None)", "def test_active_off(self):\n\n self.feature_test.set_percentage(0)\n self.assertFalse(self.feature_test.is_active)", "def test_should_render_with_discarded(self) -> None:\n self.assertFalse(self.action.should_render(\n context=self._create_request_context(\n status=ReviewRequest.DISCARDED,\n can_edit_reviewrequest=False)))", "def unHide(self):\n self.visible = True", "def IsShown(self):\r\n \r\n return not self.HasFlag(self.optionHidden)", "def hideIsoSurfaces(self):\r\n # research\r\n profprint()\r\n contourNode = slicer.util.getNode(self.contourNode)\r\n widget = slicer.modules.NeedleFinderWidget\r\n if contourNode != None:\r\n contourNode.SetDisplayVisibility(abs(widget.hideContourButton.isChecked() - 1))\r\n contourNode.GetModelDisplayNode().SetSliceIntersectionVisibility(abs(widget.hideContourButton.isChecked() - 1))", "def test_focus_not_on_hidden(self):\n target = 'hide_field'\n field = self.form.fields.get(target, None)\n result_name = self.form.assign_focus_field(target)\n focused = self.find_focus_field()\n\n self.assertTrue(isinstance(getattr(field, 'widget', None), (HiddenInput, MultipleHiddenInput, )))\n self.assertIn(target, self.form.fields)\n self.assertEqual(1, len(focused))\n self.assertNotEqual(target, focused[0])\n self.assertNotEqual(target, result_name)", "def test_feeding_decrease(self):\n available_food = 100\n expected = 90\n nt.assert_equal(expected, self.herb.feeding(available_food))", "def visible(self, show):", "def discarded(self) -> bool:\n return (\n len(self.cards) == 13 - self.game.board.purple.space - self.discard_amount\n )", "def hide_fishfry():\n if request.method == 'POST' and request.args.get('ffid'):\n ffid = request.args.get('ffid')\n r = hide_one_fishfry(ffid)\n flash('You un-published a Fish Fry ({0})'.format(ffid), 'info')\n\n return redirect(url_for('load_fishfry', ffid=ffid))", "def testViewingDesc(self):\n\n self.assertTrue(\n hasattr(self.cd, 'viewing_desc')\n )\n\n self.assertEqual(\n None,\n self.cc.viewing_desc\n )" ]
[ "0.6782351", "0.67602885", "0.6536577", "0.639235", "0.63622284", "0.6298589", "0.62585497", "0.6215377", "0.6096858", "0.60356593", "0.603329", "0.6007592", "0.592438", "0.5918852", "0.5918852", "0.58873326", "0.5886941", "0.5881228", "0.587296", "0.57984495", "0.5798309", "0.5730347", "0.5722778", "0.57071656", "0.5686281", "0.5679269", "0.56646335", "0.56542075", "0.56288713", "0.55735666", "0.55559516", "0.55555916", "0.55489385", "0.5546263", "0.5534616", "0.55339086", "0.55069435", "0.55018455", "0.5498984", "0.549376", "0.5477975", "0.54696214", "0.546747", "0.54633963", "0.5462517", "0.54590106", "0.544593", "0.544593", "0.544326", "0.54390013", "0.5435438", "0.5434849", "0.54309464", "0.542888", "0.5421941", "0.5418339", "0.54040504", "0.53924406", "0.5391809", "0.5375725", "0.5367523", "0.53653896", "0.53653896", "0.5359049", "0.5359049", "0.53504384", "0.5349684", "0.5346633", "0.5334513", "0.5319317", "0.5314575", "0.5310231", "0.5309955", "0.529954", "0.52980685", "0.5293111", "0.52925384", "0.52886987", "0.52854806", "0.5283367", "0.5278324", "0.52768934", "0.52746344", "0.5263005", "0.52594274", "0.52577966", "0.52506435", "0.52465355", "0.5246481", "0.5239757", "0.52386886", "0.5222271", "0.52218235", "0.52159727", "0.5215928", "0.51979446", "0.5197361", "0.5183794", "0.5181153", "0.51805854" ]
0.6018047
11
Returns False for out of range inputs.
def test_invalid_inputs(self): f = gtrutils.check_petition_combos self.assertFalse( f(-1, 1, [], False, False)) self.assertFalse( f( 0, 1, [], False, False)) self.assertFalse( f( 1, 0, [], False, False)) self.assertFalse( f( 1, 1, [-1], False, False)) self.assertFalse( f( 1,-1, [], False, False)) self.assertFalse( f( 1, 1, [1], False, False)) # n_off_role can never be 1 self.assertFalse( f( 1, 1, [1], True, False)) # n_off_role can never be 1 self.assertFalse( f( 1, 1, [1], False, True)) # n_off_role can never be 1 self.assertFalse( f( 1, 1, [1], True, True)) # n_off_role can never be 1 self.assertFalse( f( 1, 1, [1,3], True, True)) # n_off_role can never be 1 self.assertFalse( f( 3, 0, [2,3,3], False, True)) # n_off_role can never be 1 self.assertFalse( f( 3, 0, [2,3,3], True, False)) # n_off_role can never be 1 self.assertFalse( f( 2, 0, [2,3,3], False, True)) # n_off_role can never be 1 self.assertFalse( f( 2, 0, [2,3,3], True, False)) # n_off_role can never be 1 self.assertFalse( f( 5, 1, [6,6], True, False)) # n_off_role can never be 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isRangeValid(self) -> bool:\n ...", "def acceptsArgument(self):\n range = self.validateRange(self.range)\n return not(not(range[1]))", "def in_range(x, y):\n if (x < 0 or x > width or y < 0 or y > length):\n return False\n else:\n return True", "def _inside_op_range(self, idx):\n\n if idx < self._parameters.op_range[0]:\n return False\n return (self._parameters.op_range[1] < 0 or\n idx <= self._parameters.op_range[1])", "def check_bounds(self, index):\n if index < self.lower_bound or index > self.upper_bound:\n return False\n return True", "def __verify_range(value, minimum, maximum):\n if value in range(minimum, maximum):\n return True\n else:\n return False", "def __check_args_val(self):\n if self.__min_range < 0:\n error_msg = \"min_range must be greater than or equal to zero\"\n raise ValueError(error_msg)\n elif self.__max_range < 0:\n error_msg = \"max_range must be greater than or equal to zero\"\n raise ValueError(error_msg)\n elif self.__max_range < self.__min_range:\n error_msg = \"max_range must be greater than or equal to min_range\"\n raise ValueError(error_msg)", "def check(self):\n self.lower_bound(5e-4)\n self.upper_bound(5e2)", "def requiresArgument(self):\n range = self.validateRange(self.range)\n return not(not(range[0]))", "def out_of_range_check(self, guess, range):\r\n if ((guess<0) or (guess>=range)):\r\n return \"Input is out of range!\"\r\n else:\r\n return guess", "def validate(self, value: Any, low: int, high: int) -> bool:\n pass", "def _is_in_range(valid_values):\n\n def f(x):\n if x not in valid_values:\n raise ValueError('{} not in {}'.format(x, valid_values))", "def is_valid_value(self, value):\n if not self.range:\n return False\n\n return value >= self.range[0] and value <= self.range[1]", "def __check_range(self, movement: int) -> bool:\n next_position = self.__find_next_position_in_degrees(movement)\n if next_position < self.lowest_element.position_in_degrees:\n return False\n if next_position > self.highest_element.position_in_degrees:\n return False\n return True", "def test_inrange():\n assert cs.any > 0\n assert cs.any < cmax", "def test_args_count_in_range(args: list, min: int, max: int) -> bool:\n\n\tcount = args_count(args)\n\treturn (count >= min and count <= max)", "def validate_position(position: Tuple[int, int], bound: int) -> bool:\n if position[0] < 0 or position[0] >= bound:\n return False\n if position[1] < 0 or position[1] >= bound:\n return False\n return True", "def __is_valid(self, subscript):\n return ((0,0) <= subscript and subscript < self.size)", "def out_of_bounds(self):\n return self.rect.right <= 0", "def _is_out_of_range(self, signal, y_range, threshold):\n out_of_range = [s for s in signal if s < y_range.min or s > y_range.max]\n out_of_range_percentage = len(out_of_range) / len(signal)\n\n return out_of_range_percentage > threshold", "def f_has_range(self):\n return len(self._explored_range) > 0", "def check_range_value(array, min_=None, max_=None):\n # check lowest and highest bounds\n if min_ is not None and array.min() < min_:\n raise ValueError(\"The array should have a lower bound of {0}, but its \"\n \"minimum value is {1}.\".format(min_, array.min()))\n if max_ is not None and array.max() > max_:\n raise ValueError(\"The array should have an upper bound of {0}, but \"\n \"its maximum value is {1}.\".format(max_, array.max()))\n\n return True", "def is_valid(self, value: int) -> bool:\n return value < self.min_value or value > self.max_value", "def is_in_range(value: float, lower_bound: float, upper_bound: float, err_string: str) -> None:\n if value < lower_bound or value > upper_bound:\n print('\\n' + err_string + '\\n')\n sys.exit(1)", "def isavalidinput(self , x , u):\n ans = False\n for i in range(self.m):\n ans = ans or ( u[i] < self.u_lb[i] )\n ans = ans or ( u[i] > self.u_ub[i] )\n \n return not(ans)", "def check_binning_parameter_range(x_min, x_max, ws_unit):\n if ws_unit == 'dSpacing' and not 0 < x_min < x_max < 20:\n # dspacing within (0, 20)\n x_range_is_wrong = True\n elif ws_unit == 'TOF' and not 1000 < x_min < x_max < 1000000:\n # TOF within (1000, 1000000)\n x_range_is_wrong = True\n elif ws_unit != 'dSpacing' and ws_unit != 'TOF':\n raise NotImplementedError('Impossible case for unit {}'.format(ws_unit))\n else:\n # good cases\n x_range_is_wrong = False\n\n if x_range_is_wrong:\n ero_msg = 'For {0}, X range ({1}, {2}) does not make sense' \\\n ''.format(ws_unit, x_min, x_max)\n print('[ERROR CAUSING CRASH] {}'.format(ero_msg))\n raise RuntimeError(ero_msg)\n\n return", "def is_valid(self, value: Union[float, int]) -> bool:\n if self.min is not None:\n if self.include_min:\n if value < self.min:\n return False\n else:\n if value <= self.min:\n return False\n\n if self.max is not None:\n if self.include_max:\n if value > self.max:\n return False\n else:\n if value >= self.max:\n return False\n\n if self.step is None:\n return True\n\n if self.min is not None:\n value -= self.min\n return (value % self.step) == 0", "def _isvalid(self, x):\n return (x <= self.n) & (x > 0)", "def f_has_range(self):\n raise NotImplementedError(\"Should have implemented this.\")", "def check_coord_in_range(self, x, y):\n return 0 <= x < self.cols and 0 <= y < self.lines", "def input_check(self):\n\n if self.species == 'He': assert self.line_model == 'voigt'\n n_upper_range, e_dens_range, temp_range, b_field_range = get_param_ranges(self.line_model)\n\n if np.isnan(n_upper_range).sum() <= 1:\n assert (self.n_upper in range(n_upper_range[0], n_upper_range[1]))\n if np.isnan(e_dens_range).sum() <= 1:\n assert (e_dens_range[0] <= self.e_dens <= e_dens_range[1])\n if np.isnan(temp_range).sum() <= 1:\n assert (temp_range[0] <= self.temp <= temp_range[1])\n if np.isnan(b_field_range).sum() <= 1:\n assert (b_field_range[0] <= self.b_field <= b_field_range[1])", "def in_range(table, index):\n if index > len(table):\n print(\"Error: index out of range\")\n return False\n if index < 0:\n print(\"Error: negative index\")\n return False\n return True", "def _isInAllowedRange( self, testval, refval, reltol=1.e-2 ):\n denom = refval\n if refval == 0:\n if testval == 0:\n return True\n else:\n denom = testval\n rdiff = (testval-refval)/denom\n del denom,testval,refval\n return (abs(rdiff) <= reltol)", "def is_valid_range(parser, arg, minimum=0, maximum=100):\n if arg < minimum:\n parser.error(\"%s < %s\", arg, minimum)\n else:\n if arg > maximum:\n parser.error(\"%s > %s\", arg, maximum)\n\n return arg", "def isUndefinedRange(program: ghidra.program.model.listing.Program, startAddress: ghidra.program.model.address.Address, endAddress: ghidra.program.model.address.Address) -> bool:\n ...", "def _validate_value(self, value):\n if self.limits[0] <= value <= self.limits[1]:\n return True\n else:\n return False", "def check_out_range(value, lim_1, lim_2):\n lo_lim = min(lim_1, lim_2)\n hi_lim = max(lim_1, lim_2)\n \n if (abs(value) > abs(hi_lim)) or (abs(value) < abs(lo_lim)):\n return True\n else:\n return False", "def _in_bounds(self, x, y):\r\n return 0 <= x < 8 and 0 <= y < 8", "def check_bounds (position, size):\n \n for item in position:\n # checks whether item is out of bounds\n if item < 0 or item >= size:\n return False\n return True", "def out_of_bounds(self):\n return self.rect.right <= 0 or self.rect.left >= self.screen_rect.width", "def valid_coordinates(self, x, y):\n return ((x >= 0) and (x < self.width) and\n (y >= 0) and (y < self.height))", "def check_valid_range(val, max_val):\n if val < 0:\n val = 0\n elif val > max_val:\n val = max_val\n else:\n pass\n return val", "def ValidClusterRanges(self):\n for cluster_range in self.cluster_ranges:\n the_range = cluster_range.split(\"-\")\n print(f\"Checking that range {the_range} falls within our data area\")\n try:\n if int(the_range[0]) < self.low_data_cluster or int(the_range[1]) > self.high_data_cluster:\n print(f\"False. {the_range[0]} or {the_range[1]} is outside of our data area\")\n return False\n except TypeError as t_err:\n print(f\"Error. Range does not appear to be an int\")\n return False\n return True", "def out_of_bounds(self):\n return not 0 <= self.nodes[0].x < WIDTH * SCALE or not 0 <= self.nodes[0].y < HEIGHT * SCALE", "def is_out_of_bounds(self, position_to, position_from):\n\n board_columns = self.get_board_columns()\n board_rows = self.get_board_rows()\n\n # Need column/row ordering to simplify code.\n if position_to[0] not in board_columns or\\\n position_to[1:] not in board_rows:\n\n return True\n\n elif position_from[0] not in board_columns or\\\n position_from[1:] not in board_rows:\n\n return True\n\n return False", "def checkWithinBound(rowWithinBound,colWithinBound):\n if(rowWithinBound == 0 and colWithinBound == 0):\n return True\n else:\n return False", "def boundary(quantity, lower, upper):\r\n in_range = False\r\n while not in_range:\r\n if quantity < lower or quantity > upper:\r\n quantity = int(input(\"That is out of range, please try a number between \" + \\\r\n str(lower) + \" and \" + str(upper) + \": \"))\r\n else:\r\n in_range = True\r\n return quantity", "def __is_valid(self, pos):\n return 0 <= pos[0] < self._n and 0 <= pos[1] < self._n", "def inrange ( a , x , b ) :\n _a = float(a)\n _b = float(b)\n _x = float(x)\n return ( _a <= _x or isequal ( _a , _x ) ) and ( _x <= _b or isequal ( _x , _b ) )", "def check_bounds(self, row: int, col: int) -> bool:\n return 0 <= row < self.row and 0 <= col < self.col", "def valid(self, pos):\n\t\tpos = Point(pos)\n\t\treturn 0 <= pos.x < self.dims.width and 0 <= pos.y < self.dims.height", "def in_range(data, minval=-np.inf, maxval=np.inf):\n return (minval <= data) & (data <= maxval)", "def out_of_bounds(self):\n return self._parms.get(\"out_of_bounds\")", "def __call__(self, value: np.ndarray) -> bool:\n for k, bound in enumerate(self.bounds):\n if bound is not None:\n if np.any((value > bound) if k else (value < bound)):\n return False\n return True", "def check_ranges(ranges, value):\n for fromto in ranges:\n start, end = fromto.split('-')\n if int(value) in range(int(start), int(end) + 1):\n return True\n # else:\n # print('%s is not between %s and %s' % (value, start, end))\n return False", "def validate_correct_hint(self):\n is_response_hint_valid = False\n while is_response_hint_valid is False:\n hint_value = self.ask_user_input(\"Enter maximum hint threshold\")\n if not hint_value.isdigit():\n print(\"Not a number, please try again\")\n elif 0 <= int(hint_value) <= 81:\n is_response_hint_valid = True\n self.current_response = hint_value\n else:\n print(\"Number is out of the valid range, please try again\")\n return is_response_hint_valid", "def check_value(self, pos):\n if self.limits is not None:\n low, high = self.limits\n if low != high and not (low <= pos <= high):\n raise ValueError(\"{} outside of user-specified limits\" \"\".format(pos))\n else:\n self.setpoint.check_value(pos)", "def in_range(start, end, x):\n if start <= end:\n return start <= x <= end\n else:\n return start <= x or x <= end", "def __verify_index(self, index):\n if not isinstance(index, int):\n raise TypeError(\"Index must be of type int\")\n elif index >= self.length or index < -self.length:\n raise IndexError(\"Index out of bounds\")\n return True", "def is_valid_number(self):\n for condition in [self.game.getRow(self.pos), self.game.getCol(self.pos), self.game.getSquare(self.pos)]:\n if not self.check_alignement_condition(condition):\n return False\n return True", "def _in_range_op(spec):", "def __check_args_type(self):\n if not isinstance(self.__min_range, (float, int)):\n error_msg = \"min_range must of type int or float, but given: \"\n error_msg += str(type(self.__min_range))\n raise TypeError(error_msg)\n elif not isinstance(self.__max_range, (float, int)):\n error_msg = \"max_range must of type int or float, but given: \"\n error_msg += str(type(self.__max_range))\n raise TypeError(error_msg)\n\n if isinstance(self.__min_range, bool):\n error_msg = \"min_range must of type int or float, but given: \"\n error_msg += str(type(self.__min_range))\n raise TypeError(error_msg)\n elif isinstance(self.__max_range, bool):\n error_msg = \"max_range must of type int or float, but given: \"\n error_msg += str(type(self.__max_range))\n raise TypeError(error_msg)", "def _check_joint_limits(self, abs_input: [list, np.ndarray]):\n assert len(abs_input) == self.DoF, \"The number of joints should match the arm DoF.\"\n if not np.all(np.bitwise_and(abs_input >= self.limits['lower'][:self.DoF],\n abs_input <= self.limits['upper'][:self.DoF])):\n print(\"Joint position out of valid range!\")\n print(\"Set joint:\", abs_input)\n return False\n return True", "def _arguments_valid(self) -> bool:\n return self.find and self.near and self.max_results >= 1", "def _check_range(r): \n if _is_single_range(r):\n _check_one_range(r)\n elif isinstance(r, collections.Sequence):\n for r2 in r:\n _check_one_range(r2)\n else:\n raise error.RangeSyntaxError(str(r))", "def is_in_range(self, price):\r\n return price <= self.pmax and price >= self.pmin", "def in_pixel_range(self, pixmin: int, pixmax: int) -> bool:\n \n if any(i < pixmin or i > pixmax or np.isnan(i) for i in self.datapos):\n return False\n\n return True", "def in_valid_range(self, string):\n fret_number = self.cursor.get_frets()[string]\n return (\n (self.min_x <= fret_number <= self.max_x) or\n (self.allow_open and fret_number == self.guitar.min_fret)\n )", "def check_input(nodes, num_edges):\n num_nodes = len(nodes)\n min_edges = num_nodes - 1\n if num_edges < min_edges:\n raise ValueError('num_edges less than minimum (%i)' % min_edges)\n max_edges = num_nodes * (num_nodes - 1)\n if num_edges > max_edges:\n raise ValueError('num_edges greater than maximum (%i)' % max_edges)", "def isInputValid(self, input):\r\n pass", "def isInRange(val, minv, maxv):\n\treturn val >= minv and val <= maxv", "def chkLimits(name, value, Min, Max, unit = 'V', Hex = False):\n\n #global Log\n if not Min < value < Max:\n if Hex:\n line = \"%s:0x%X OUT OF LIMITS (0x%X, 0x%X). Test Failed !\" %(name, value, Min, Max)\n else:\n line = \"%s:%F %s OUT OF LIMITS (%F, %f). Test Failed !\" %(name, value, unit, Min, Max)\n Log.logError(line)\n Err.bumpError()\n return False\n if Hex:\n Log.logText(' '+'%s:0x%X expected range from:0x%X To: 0x%X. Test PASS !'% (name, value, Min, Max))\n else:\n Log.logText(' '+'%s:%F %s expected range From:%F %s To: %F %s. Test PASS !'% (name, value, unit, Min,unit, Max, unit))\n return True", "def _is_range_boundary(boundary):\n return (isinstance(boundary, numbers.Integral) or\n (_is_string(boundary) and (boundary.lower() in ('min','max'))))", "def test_output_range(self):\n byt = bytscl(self.array1)\n outside = (byt < 0) | (byt > 255)\n total = numpy.sum(outside)\n self.assertEqual(total, 0)", "def range_between_0_and_9(self, user_num):\r\n if 0 <= user_num < 9:\r\n return True\r\n else:\r\n return False", "def check_input(min_guess_range, max_guess_range):\n\twhile True:\n\t\ttry:\n\t\t\tplayerGuess = int(input('enter your guess: '))\n\t\t\tassert min_guess_range <= playerGuess <= max_guess_range\n\n\t\texcept AssertionError:\n\t\t\tprint('guess should be between {0} - {1}!'.format(min_guess_range, max_guess_range))\n\t\texcept ValueError:\n\t\t\tprint('numbers only!')\n\t\telse:\n\t\t\treturn playerGuess", "def _inrange(self, index):\n if len(index) != self.ndim:\n raise Exception('SparseN tensor has %d dimensions, and requires the same number of indices.'%self.ndim)\n for ii, ss in zip(index,self.shape):\n if ii < 0 or ii >= ss:\n raise Exception('Index is out of range: %d'%index)", "def _check_range(range_):\n try:\n if not isinstance(range_, list):\n range_ = list(range_)\n min_, max_ = range_\n except (ValueError, TypeError):\n raise TypeError(\"each range in ising_linear_ranges should be a list of length 2.\")\n if not isinstance(min_, Number) or not isinstance(max_, Number) or min_ > max_:\n raise ValueError((\"each range in ising_linear_ranges should be a 2-tuple \"\n \"(min, max) where min <= max\"))\n return range_", "def range_function(num, start_range, end_range):\n if num > start_range and num < end_range:\n print(num, \"is in the range.\\n\")\n elif num < start_range or num > end_range:\n print(num, \"is not in the range.\\n\")", "def in_bounds(self, location: tuple) -> bool:\n return 0 <= min(location) and max(location) <= 7", "def test_if_input_is_negative(self):\n self.assertEquals(prime_numbers(-5), \"Numbers less than or equal to zero are not allowed!\")", "def bounds_check(session):\n\n max_ = session.field.opts.max\n min_ = session.field.opts.min\n\n if max_ is not None and len(session.data) > max_:\n raise session.field.invalid(error_type='out_of_bounds')\n if min_ is not None and len(session.data) < min_:\n raise session.field.invalid(error_type='out_of_bounds')\n\n return session.data", "def isinf(x):\n return False", "def validate_points(self, data):\n if data> 1:\n data = 1\n elif data < 0:\n data=0\n return data", "def __bool__(self):\n return self.end < len(self.data)", "def _is_range(cls, rng):\n match = re.search(\"([0-9][1-9]*)-([0-9][1-9]*)\", rng)\n # Group is a singular value.\n return match is not None", "def test_creation_bounds_not_inclusive():\n with pytest.raises(ValueError) as __:\n value = -42\n __ = param.Integer(value=value, hardbounds=[-42, 100], inclusive_bounds=[False, False])", "def _check_support(X: np.ndarray, **kwargs) -> None:\n\n assert (X >= 0).all() & isinteger(\n X\n ), \"x should be greater or equal to 0 and integer.\"", "def is_valid(self, move):\r\n return move > 10 and move < 89", "def is_valid_pos(self, pos_step):\n return not (self.pos.x % pos_step or self.pos.y % pos_step)", "def test_threshold_range_a(self):\n code, out, err = self.t.runError(\"--threshold --max 3.1 --min 3.2\")\n self.assertIn(\"The min value must be lower than the max value.\", out)", "def valid_position(self, new_coords: tuple) -> bool:\n x, y = new_coords\n min_allowed_value = self.offset\n max_allowed_value = 10 - self.offset\n\n # If the value is outside of the board on the left or up, return false\n if x < min_allowed_value or y < min_allowed_value:\n return False\n # If the value is outside of the board on the right or down sides, return false\n if x > max_allowed_value or y > max_allowed_value:\n return False\n\n # If the position is taken by any piece, return false\n if self.board[y][x] != 0:\n print(\"Error: Position taken by %d\" % self.board[y][x])\n return False\n return True", "def test_return_false_if_input_less_than_zero(self):\n \n self.assertEqual(vowel_check.is_vowel(-1), False)\n self.assertEqual(vowel_check.is_vowel(0.9), False)", "def verify_valid_num(self, user_num):\r\n if not self.range_between_0_and_9(user_num):\r\n print(\"\\033[1;31mJust what do you think you're doing, Dave? Choose a number between 0 and 8\\033[0m\")\r\n return False\r\n\r\n return True", "def check_interval_bounds(begin, end):\n if begin.get_midpoint() >= end.get_midpoint():\n return False\n\n if begin.get_radius() is not None and end.get_radius() is not None:\n if begin.get_midpoint() - begin.get_radius() > \\\n end.get_midpoint() - end.get_radius():\n return False\n\n return True", "def validate_query_range(query_range: str) -> (bool, str):\n try:\n start, end = query_range.split(\"-\")\n start = int(start.strip())\n end = int(end.strip())\n if start < 1:\n return False, \"Start of range should be >= 1\"\n if end < 1:\n return False, \"End of range should be >= 1\"\n\n return True, f\"{start-1}-{end-1}\" # subtract by one as index always start with 0\n\n except ValueError as err:\n return False, err", "def in_range(low, high, step=None):\n def check(value):\n if not low <= value < high:\n return False\n\n if step is not None:\n return (value - low) % step == 0\n return True\n\n return check", "def main():\n inputx = int(input())\n inputy = int(input())\n inputa = input()\n if inputa == \"True\":\n if (inputx < 0 and inputy >= 0) or (inputy < 0 and inputx >= 0):\n print(\"True\")\n else:\n print(\"False\")\n else:\n if (inputx >= 0 and inputy >= 0) or (inputx < 0 and inputy < 0):\n print(\"True\")\n else:\n print(\"False\")", "def test_viable(self,outs):\n \n viable = True\n for i,temp_i in enumerate(outs):\n if (temp_i <= self.mins[i+4]):\n viable = False\n elif (temp_i >= self.maxes[i+4]): \n viable = False\n return viable", "def in_bounds(self, x, y):\n return x >= 0 and x < 8 and y >= 0 and y < 8", "def check_overflow(self):\n self.stateC = self.toConceptual(self.state)\n\n check_inf = torch.any(torch.isinf(self.stateC)) or torch.any(\n torch.isinf(self.state))\n check_nan = torch.any(torch.isnan(self.stateC)) or torch.any(\n torch.isnan(self.state))\n\n if check_inf or check_nan:\n return True\n else:\n return False" ]
[ "0.76130134", "0.74046963", "0.73377717", "0.72116476", "0.7005175", "0.69723386", "0.68953574", "0.6872818", "0.68704146", "0.6861065", "0.68098015", "0.66929597", "0.66748506", "0.6673904", "0.66729593", "0.66561925", "0.66364926", "0.6613519", "0.66080934", "0.6600456", "0.65958244", "0.65814877", "0.65548646", "0.6545156", "0.65307695", "0.65216273", "0.6521574", "0.65148526", "0.64945096", "0.6477773", "0.64317775", "0.642873", "0.64280516", "0.6421419", "0.6421332", "0.63838685", "0.6378771", "0.63742536", "0.6373527", "0.63639206", "0.63559777", "0.6343801", "0.6308636", "0.6301771", "0.6285708", "0.6258776", "0.62421006", "0.62350875", "0.62340033", "0.623053", "0.62232774", "0.6219666", "0.6216784", "0.6212728", "0.62068313", "0.61893594", "0.61755866", "0.61662924", "0.61657214", "0.6164114", "0.61584526", "0.61503315", "0.61502635", "0.6146004", "0.61431605", "0.614155", "0.6133837", "0.61326134", "0.6130812", "0.61286336", "0.61231077", "0.6119842", "0.61176777", "0.60907966", "0.6078766", "0.6066319", "0.6058402", "0.6055483", "0.60381216", "0.60374486", "0.6037379", "0.60368896", "0.6033431", "0.60322684", "0.6029419", "0.602841", "0.6023649", "0.6019699", "0.601914", "0.6013514", "0.6007497", "0.60003257", "0.5999294", "0.5993739", "0.599361", "0.59927005", "0.5988561", "0.59857434", "0.5984851", "0.5982192", "0.59773237" ]
0.0
-1
Test with no petitions allowed.
def test_no_petitions(self): f = gtrutils.check_petition_combos self.assertTrue( f( 0, 0, [ 0], False, False)) self.assertFalse( f( 1, 0, [], False, False)) self.assertFalse( f( 1, 1, [2], False, False)) self.assertFalse( f( 1, 1, [3], False, False)) self.assertFalse( f( 1, 1, [4], False, False)) self.assertTrue( f( 1, 1, [], False, False)) self.assertFalse( f( 1, 2, [], False, False)) self.assertFalse( f( 1, 3, [], False, False)) self.assertFalse( f( 2, 1, [], False, False)) self.assertTrue( f( 2, 2, [], False, False)) self.assertFalse( f( 2, 3, [], False, False)) self.assertFalse( f( 3, 1, [], False, False)) self.assertFalse( f( 3, 2, [], False, False)) self.assertTrue( f( 3, 3, [], False, False)) self.assertTrue( f(13,13, [], False, False)) self.assertFalse( f( 1, 1, [0,0,0,3], False, False)) self.assertFalse( f( 2, 1, [0,0,0,3], False, False)) self.assertFalse( f( 3, 1, [0,0,0,3], False, False))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testNoSpecialties(self):\n self.failUnlessEqual(self.person.getSpecialties(), [])", "def not_test_without_user(self):\n # TODO", "def test_tally_no_candidates(self):\n self.init_elect_types()\n\n userA = models.User(\n name = \"UserA\",\n email = \"userA@eLect.com\",\n password = \"asdf\")\n\n session.add(userA)\n session.commit()\n\n electionA = models.Election(\n title = \"Election A\",\n admin_id = userA.id)\n\n session.add(electionA)\n session.commit()\n\n raceA = models.Race(\n title = \"Race A\",\n election_id = electionA.id\n )\n\n session.add(raceA)\n session.commit()\n\n with self.assertRaises(NoCandidates):\n self.wta.check_race(raceA.id)\n\n with self.assertRaises(NoCandidates):\n self.proportional.check_race(raceA.id)\n\n with self.assertRaises(NoCandidates):\n self.schulze.check_race(raceA.id)", "def noCondition(self):\n result = Activatable(self.effects).canActivate(self.game)\n self.assertTrue(result, \"The Activatable should be activatable\")", "def violated(self) -> bool:\n ...", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def test_disallow_absent_fighting(self):\n londo = self.get_region(\"Orange Londo\")\n self.alice.region = londo\n self.sess.commit()\n\n with self.assertRaises(db.NotPresentException):\n self.battle.create_skirmish(self.alice, 1)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 0)", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_reject_proposal_demand(self):\n pass", "def test_wip(self):\n self.assertTrue(not return_true())", "def test_empty_functions():", "def test_empty_private(self):\n self.do_visible(True, None, False, is_admin=True)", "def test_partial_deck_doesnt_have_ignored_cards(self):\n self.assertEqual(self.ignoredCardPresent, False)", "def test_tally_no_votes(self):\n self.populate_database()\n self.electionA.elect_open = False\n with self.assertRaises(NoVotes):\n self.wta.check_race(self.raceA.id)\n\n with self.assertRaises(NoVotes):\n self.proportional.check_race(self.raceA.id)\n\n with self.assertRaises(NoVotes):\n self.schulze.check_race(self.raceA.id)", "def test_empty_private_owned(self):\n self.do_visible(True, 'pattieblack', False, is_admin=True)", "def test_check_inputs_resident_prefs_all_nonempty(game):\n\n resident = game.residents[0]\n resident.prefs = []\n\n with pytest.warns(PlayerExcludedWarning) as record:\n game._check_inputs_player_prefs_nonempty(\"residents\", \"hospitals\")\n\n assert len(record) == 1\n assert resident.name in str(record[0].message)\n\n if game.clean:\n assert resident not in game.residents", "def prove_NO() -> Proof:\n # Optional Task 6.9c", "def test_no_overprovision(self):\n command_line = (\n self._MENU + [self._POOLNAME] + self._DEVICES + [\"--no-overprovision\"]\n )\n TEST_RUNNER(command_line)", "def test_nothing(self):", "def test_not_unopposed(self):\n s1 = self.battle.create_skirmish(self.alice, 2) # Attack 2\n s1.react(self.bob, 1) # --Attack 1\n s1.resolve()\n self.assertFalse(s1.unopposed)", "def test_privatize_fountain_card(self):\n g = Game()\n g.add_player(uuid4(), 'p0')\n g.add_player(uuid4(), 'p1')\n\n gs = g\n p0, p1 = gs.players\n\n latrine, insula, statue, road = cm.get_cards(['Latrine', 'Insula', 'Statue', 'Road'])\n p0.fountain_card = latrine\n\n gs_private = g.privatized_game_state_copy('p1')\n p0, p1 = gs_private.players\n\n self.assertEqual(p0.fountain_card, Card(-1))", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def test_not_logged_in(self):\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n AnonymousUser(), album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n AnonymousUser(), album.display_photo, ALBUM_PUBLIC)", "def test_positive_electrode_potential_profile(self):\n\n # TODO: add these when have averages", "def is_ignored(self):", "def allow(self, test):\n raise NotImplementedError()", "def test_falsepositive(client):\n g.test_authorized_for = []\n res = client.get(\"/v0/falsepositive\" + get_request_args)\n assert \"Thanks! We’ve marked this as a false positive\" in res.data.decode(\"utf-8\")", "def test_dontStartPrivilegedService(self):\n ports = self._privilegedStartService(self.highPortNumber)\n self.assertEqual(ports, [])", "def test_play_no_gain(self):\n self.card = self.g[\"Festival\"].remove()\n self.plr.piles[Piles.HAND].set(\"Duchy\")\n self.plr.add_card(self.card, Piles.HAND)\n self.plr.favors.set(2)\n self.plr.test_input = [\"No\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.favors.get(), 2)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 1)", "def test_ignore_edition(self):\n self.assertFalse(ignore_edition({\"isbn_13\": \"hi\"}))\n self.assertFalse(ignore_edition({\"oclc_numbers\": \"hi\"}))\n self.assertFalse(ignore_edition({\"covers\": \"hi\"}))\n self.assertFalse(ignore_edition({\"languages\": \"languages/fr\"}))\n self.assertTrue(ignore_edition({\"languages\": \"languages/eng\"}))\n self.assertTrue(ignore_edition({\"format\": \"paperback\"}))", "def test_find_with_no_pets(self):\n pet = Pet.find(1)\n self.assertIs(pet, None)", "def cant(user, action):\n\n return not can(user, action)", "def test_nomatch_for(fake_good_request, bad_for_predicate):\n kwargs = dict(for_=FakeGoodView)\n predicate_values = [bad_for_predicate]\n result = predicates_match(fake_good_request, predicate_values, **kwargs)\n assert not result", "def not_met(predicate, request):\n return not predicate(request)", "def noyable(self):\n return False", "def test_no_games(self):\n sgf = \"\"\n try:\n coll = parseSgf(sgf)\n except SgfParseError, e:\n if str(e) == \"Empty collection.\":\n return\n self.assertFalse(1)", "def test_negatives(self):\n model = PoincareModel(self.data, negative=5)\n self.assertEqual(len(model._get_candidate_negatives()), 5)", "def test_nonsubscriber(self) -> None:\n # Create a stream for which Hamlet is the only subscriber.\n stream_name = \"Saxony\"\n self.common_subscribe_to_streams(self.user_profile, [stream_name])\n other_user = self.example_user(\"othello\")\n\n # Fetch the subscriber list as a non-member.\n self.login_user(other_user)\n self.make_successful_subscriber_request(stream_name)", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def test_user_is_none(self):\n self.assertFalse(send_rotate_to_can(None, self.BIN_NUM))", "def test_check_inputs_hospital_prefs_all_nonempty(game):\n\n hospital = game.hospitals[0]\n hospital.prefs = []\n\n with pytest.warns(PlayerExcludedWarning) as record:\n game._check_inputs_player_prefs_nonempty(\"hospitals\", \"residents\")\n\n assert len(record) == 1\n assert hospital.name in str(record[0].message)\n\n if game.clean:\n assert hospital not in game.hospitals", "def test_empty_public_owned(self):\n self.do_visible(True, 'pattieblack', True, is_admin=True)", "def test_non_trialing(self):\n account = AccountFactory(status=Account.AccountStatus.ACTIVE)\n request = self.rf.get(\"/\")\n request.account = account\n context = {\"request\": request}\n\n context = accounts_tags.trial_banner(context)\n\n assert not context[\"display_banner\"]", "def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def is_emptiable(self) -> bool:\n raise NotImplementedError()", "def test_not_permitted(self, default_store):\n course = self.create_course_with_orphans(default_store)\n orphan_url = reverse_course_url('orphan_handler', course.id)\n\n test_user_client, test_user = self.create_non_staff_authed_user_client()\n CourseEnrollment.enroll(test_user, course.id)\n response = test_user_client.get(orphan_url)\n self.assertEqual(response.status_code, 403)\n response = test_user_client.delete(orphan_url)\n self.assertEqual(response.status_code, 403)", "def noaccess(self):\n self.assertEqual(self.client.get(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.post(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.delete(self.ENDPOINT).status_code, 403)", "def test_nonVisibilityAffected(self):\n self.assertEquals(visibles(self.observer.idea, iimaginary.IThing), [])\n # XXX need another test: not blocked out from ...", "def test_unpopular(self):\n self.assertFalse(self.user3.is_popular())\n self.user3.receive_upvotes(randint(101, 10000))\n self.assertTrue(self.user3.is_popular())", "def test_non_contractor_acks_receipt(self):\n res = self.client.post(self.url)\n self.assertEqual(res.status_code, 403)", "def test_is_active_without_ops(self):\n\n self.veh.health = 2\n self.veh.operators = []\n self.assertFalse(self.veh.is_active)", "def test_noTicket():\n assert testUser1.buyTicket(None) == False", "def test_not_permitted(self):\r\n test_user_client, test_user = self.create_non_staff_authed_user_client()\r\n CourseEnrollment.enroll(test_user, self.course.id)\r\n response = test_user_client.get(self.orphan_url)\r\n self.assertEqual(response.status_code, 403)\r\n response = test_user_client.delete(self.orphan_url)\r\n self.assertEqual(response.status_code, 403)", "def test_disallow_extract(self):\n self.battle.create_skirmish(self.alice, 1)\n cap = self.get_region(\"Oraistedarg\")\n self.assertNotEqual(self.alice.region, cap)\n\n with self.assertRaises(db.InProgressException):\n self.alice.extract()\n\n n = (self.sess.query(db.MarchingOrder).\n filter_by(leader=self.alice)).count()\n self.assertEqual(n, 0)\n self.assertNotEqual(self.alice.region, cap)", "def test_published_story_must_be_visible_for_everyone_but_blocked(self):\n self.assertEqual(self.ps.is_visible_for(self.au), True)\n\n \"\"\" Published story must be visible for another.\"\"\"\n self.assertEqual(self.ps.is_visible_for(self.u2), True)\n\n \"\"\" Publsihed story must be visible for owner. \"\"\"\n self.assertEqual(self.ps.is_visible_for(self.u1), True)\n\n \"\"\" Draft story must not be visible for a blocked user. \"\"\"\n self.assertEqual(self.ds.is_visible_for(self.u3), False)", "def valid_prohibited_none_role(arch, **kwargs):\n xpath = '//*[@role=\"none\" or @role=\"presentation\"]'\n if arch.xpath(xpath):\n return \"Warning\"\n return True", "def test_no_overkill(self):\n s1 = self.battle.create_skirmish(self.alice, 10) # Attack 10\n s1.react(self.carol, 10, hinder=False) # Right amount = ok\n\n with self.assertRaises(db.TooManyException):\n s1.react(self.bob, 11)", "def test_nonVisibilityUnaffected(self):\n self.assertEquals(\n list(self.observer.idea.obtain(\n idea.Proximity(3, idea.ProviderOf(iimaginary.IThing)))),\n [self.observer, self.location, self.rock]\n )", "def test_contains_false(self):\n self.assertFalse('Not_a_Category' in self.tester)", "def test_contains_false(self):\n self.assertFalse('Not_a_Category' in self.tester)", "def test_noop():\n assert True", "def testInvalidDescriptions(self):\n self.assertFalse(self.app._ignore_jobs(\"telecommuting is not an option\"))\n self.assertFalse(self.app._ignore_jobs(\"No telecommuting\"))\n self.assertFalse(self.app._ignore_jobs(\"No telecommute\"))\n self.assertFalse(self.app._ignore_jobs(\"TELECOMMUTE IS NOT AN OPTION\"))", "def test_noFailure(self):\n for i in range(10):\n self.assertTrue(self.circuit_breaker.available())", "def test_no_permission(self):\n override_acl(self.user, {'can_use_private_threads': 0})\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"t use private threads\", status_code=403)", "def test_tally_no_races(self):\n self.init_elect_types()\n\n userA = models.User(\n name = \"UserA\",\n email = \"userA@eLect.com\",\n password = \"asdf\")\n\n session.add(userA)\n session.commit()\n\n electionA = models.Election(\n title = \"Election A\",\n admin_id = userA.id)\n\n session.add(electionA)\n session.commit()\n\n with self.assertRaises(NoRaces):\n self.wta.check_race(1)\n\n with self.assertRaises(NoRaces):\n self.proportional.check_race(1)\n\n with self.assertRaises(NoRaces):\n self.schulze.check_race(1)", "def test_not_notified(scraper):\n\n assert scraper.scrape_profile('https://www.punters.com.au/') is None", "def is_acceptable(self):", "def testHasNoInvites(self):\r\n u = User()\r\n u.invite_ct = 0\r\n self.assertFalse(u.has_invites(), 'User should have no invites')\r\n self.assertFalse(\r\n u.invite('me@you.com'), 'Should not be able to invite a user')", "def test_disallow_betrayal(self):\n s1 = self.battle.create_skirmish(self.alice, 1)\n with self.assertRaises(db.TeamException):\n s1.react(self.bob, 1, hinder=False)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 1)", "def test_no_skill_request(self):\n actions.login(ADMIN_EMAIL)\n\n response = self.get(self.URL)\n self.assertEqual(200, response.status_int)\n payload = transforms.loads(response.body)['payload']\n result = transforms.loads(payload)\n\n self.assertEqual(['Date'], result['column_headers'])\n self.assertEqual([], result['data'])", "def test_disallow_retreat(self):\n self.battle.create_skirmish(self.alice, 1)\n londo = self.get_region(\"Orange Londo\")\n\n with self.assertRaises(db.InProgressException):\n self.alice.move(100, londo, 0)\n\n n = (self.sess.query(db.MarchingOrder).\n filter_by(leader=self.alice)).count()\n self.assertEqual(n, 0)", "def test_no_permissions(self):\n\n login(self.client)\n\n client = create_client('test')\n client.write_access = False\n client.save()\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': 3})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def test_no_permission(self):\n override_acl(self.user, {'can_use_private_threads': 0})\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"can't use private threads\", status_code=403)", "def is_inequality(self): \n return False", "def isLegal(self):\n counter = 0\n for t in self.types:\n if t > 0:\n counter = counter + 1\n if counter < 4:\n return True\n else:\n return False", "def test_return_none(self):\n user = create_user('passwordmichu', 'michu')\n coll = create_collection('Coleccion de cartas')\n problem = create_select_problem(coll, 'Problema')\n self.assertIsNone(problem.solved_position(user))", "def test_feature_disabled(self, url):\n response = self.client.get(url)\n assert response.status_code == 403\n response = self.client.post(url)\n assert response.status_code == 403", "def test_contains_false(self):\n self.assertFalse('Not_a_Sample' in self.tester)", "def test_contains_false(self):\n self.assertFalse('Not_a_Sample' in self.tester)", "def test_simple_unopposed(self):\n s1 = self.battle.create_skirmish(self.alice, 1)\n s1.resolve()\n self.assert_(s1.unopposed)\n\n # Should be worth 2 VP\n self.assertEqual(s1.vp, 2)", "def test_requires_privilege_no_such(self):\n @requires_privilege('bomboozle', domain='zizzle')\n def view(request, *args, **kwargs):\n pass\n\n requestor_role = arbitrary.role()\n request = HttpRequest()\n request.role = requestor_role\n with self.assertRaises(PermissionDenied):\n view(request)", "def test_op_no_ticket(self):\n assert OP_NO_TICKET == 0x4000", "def primers_are_useless(self):\r\n #TODO: send a message telling these primers can be taken out.\r\n for feature in self.gt_seq_region:\r\n if feature.attributes.active:\r\n feature.attributes.disable_feature(\"has no interesting sequence variation\")\r\n for feature in self.pcr_product:\r\n if feature.attributes.active:\r\n feature.attributes.disable_feature(\"has no interesting sequence variation\")\r\n for feature in self.forward_primer:\r\n if feature.attributes.active:\r\n feature.attributes.disable_feature(\"has no interesting sequence variation\")\r\n for feature in self.reverse_primer:\r\n if feature.attributes.active:\r\n feature.attributes.disable_feature(\"has no interesting sequence variation\")", "def test_non_thesis(non_thesis):\n assert non_thesis is None", "def test_get_users_eligible_for_fist_notification_with_no_result(self):\n # Given:\n self.batch_setup()\n # When:\n response = self.client.get(\"/api/batch/account/users/eligible-for-first-notification\", headers=self.headers)\n # Then:\n self.assertTrue(200, response.status_code)\n users = response.get_json()\n self.assertEqual(0, len(users))\n self.assertNotIn(self.user_0, users)\n self.assertNotIn(self.user_2, users)\n self.assertNotIn(self.user_1, users)\n self.assertNotIn(self.user_3, users)", "def test_basicNoSalePC(self):\n # Basic price check\n self.log.info(\"Price checking Generic Item via speedkey\")\n pos.click(\"Price Check\")\n pos.click_speed_key(\"Generic Item\")\n \n # Confirm the right item, at the right price\n self.read_price_check(\"Generic Item\", \"$0.01\")\n # Don't add the item\n pos.click(\"Ok\")\n \n # Confirm we aren't in a transaction\n if self.in_transaction():\n self.tc_fail(\"Unintentionally In Transaction\")\n else:\n self.log.info(\"Confirmed we are not in a transaction\")\n \n # Setup for next test\n self.recover()", "def test_OneOfEverything(self):\n self._run(self._test_scenarios, \"OneOfEverything\")", "def test_skipif_false():\n pass", "def test_vote_when_none_choice_was_selected(self):\n question_no_choices = create_question_without_choices(question_text=\"Question wihout Choices.\", days=-1)\n url = reverse('polls:vote', args=(question_no_choices.id,))\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, question_no_choices)\n self.assertTrue('error_message' in response.context)\n self.assertEqual(response.context['error_message'], \"You didn't select a choice.\")", "async def should_handle(self):\n return self.main.base_amount > 4 and self.main.can_build_unique(UnitTypeId.INFESTATIONPIT, self.main.pits)", "def testcheatFalse(self):\n import Cheat\n res = Cheat.cheatclass.cheatF(self)\n exp = Cheat.cheatclass.cheatingR(self)\n\n self.assertFalse(res, exp)", "def test_noticedDoesntPrivmsg(self):\n\n def privmsg(user, channel, message):\n self.fail(\"privmsg() should not have been called\")\n\n self.protocol.privmsg = privmsg\n self.protocol.irc_NOTICE(\"spam\", [\"#greasyspooncafe\", \"I don't want any spam!\"])", "def test_forbidden_non_taggers(self):\n phenotype_taggers = Group.objects.get(name='phenotype_taggers')\n self.user.groups.remove(phenotype_taggers)\n response = self.client.get(self.get_url(self.trait.pk))\n self.assertEqual(response.status_code, 403)", "def test_post_options_unauthorized(self):\n url = reverse('post-list')\n response = self.client.options(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn('Post List', response.content)", "def test_none_meet(self, initial_placement_fixture):\n assert len(ctx.cluster.influx_db.aggregate_performance()) == 0, \\\n \"Test should run on the basic model\"\n self.generic_function(above_objective=0)", "def test_empty_public(self):\n self.do_visible(True, None, True, is_admin=True)", "def testValidDescriptions(self):\n self.assertTrue(self.app._ignore_jobs(\"\"))\n self.assertTrue(self.app._ignore_jobs(\"This is valid\"))\n self.assertTrue(self.app._ignore_jobs(\"you can telecommute\"))", "def test_visibility_of_not_available_4(self):\n partner = PartnerFactory(status=Partner.NOT_AVAILABLE)\n list_url = reverse('partners:list')\n\n editor = EditorFactory()\n editor.user.is_staff = True\n editor.user.save()\n\n request = RequestFactory().get(list_url)\n request.user = editor.user\n response = PartnersListView.as_view()(request)\n\n self.assertContains(response, partner.get_absolute_url())", "def is_unrestricted(self):\n raise exceptions.NotImplementedError()", "def test_no_op(self):\n request = RequestFactory().get('/?tags=')\n qs = MockQuerySet()\n filter = TestFilterSet(request.GET, qs)\n self.assertNotIn('tags__slug__in', filter.qs.filters)" ]
[ "0.64601696", "0.63709843", "0.62266237", "0.6206515", "0.5949884", "0.5946186", "0.5936945", "0.5928463", "0.59168476", "0.5914561", "0.59085196", "0.58991575", "0.58986336", "0.5888519", "0.5865907", "0.58641195", "0.58631915", "0.58543706", "0.5849753", "0.58493304", "0.5841391", "0.5839636", "0.58384335", "0.58377755", "0.58375996", "0.58311856", "0.5820184", "0.58062094", "0.580405", "0.58037144", "0.579636", "0.5793583", "0.5783966", "0.57804245", "0.57633185", "0.5754655", "0.5739635", "0.57319546", "0.57301426", "0.57297385", "0.57275826", "0.5711269", "0.57076925", "0.57067925", "0.56960267", "0.56952095", "0.5695016", "0.5686726", "0.5685698", "0.5679766", "0.56776005", "0.56706333", "0.56700283", "0.56671715", "0.56569296", "0.565684", "0.5654898", "0.56495965", "0.56474316", "0.56474316", "0.564498", "0.56328285", "0.5631361", "0.5630403", "0.5630128", "0.5618503", "0.5614398", "0.5605015", "0.56007034", "0.5600086", "0.559753", "0.559401", "0.5590738", "0.5588433", "0.55860245", "0.55834454", "0.5581573", "0.55809987", "0.55809987", "0.5572045", "0.5570174", "0.55696875", "0.55693567", "0.55592567", "0.5558617", "0.5556467", "0.5554746", "0.55545837", "0.5554384", "0.55528116", "0.55509186", "0.5550118", "0.5548073", "0.55478907", "0.55445147", "0.5538494", "0.5536799", "0.5532709", "0.5526135", "0.5518042" ]
0.551846
99
Test with only threecard petitions allowed.
def test_only_three_card_petitions(self): f = gtrutils.check_petition_combos self.assertTrue( f( 0, 0, [0], False, True)) self.assertFalse( f( 1, 0, [0], False, True)) self.assertTrue( f( 1, 1, [0], False, True)) self.assertTrue( f( 1, 0, [3], False, True)) self.assertTrue( f( 1, 3, [0], False, True)) self.assertFalse( f( 1, 1, [2], False, True)) self.assertFalse( f( 1, 1, [3], False, True)) self.assertFalse( f( 1, 1, [4], False, True)) self.assertTrue( f( 2, 2, [0], False, True)) self.assertTrue( f( 2, 1, [3], False, True)) self.assertTrue( f( 2, 3, [3], False, True)) self.assertTrue( f( 2, 6, [0], False, True)) self.assertTrue( f( 2, 0, [6], False, True)) self.assertFalse( f( 2, 4, [3], False, True)) self.assertFalse( f( 3, 1, [], False, True)) self.assertFalse( f( 3, 2, [], False, True)) self.assertFalse( f( 3, 0, [3], False, True)) self.assertFalse( f( 3, 0, [6], False, True)) self.assertTrue( f( 3, 3, [], False, True)) self.assertTrue( f( 3, 2, [3], False, True)) self.assertTrue( f( 3, 3, [6], False, True)) self.assertTrue( f( 3, 1, [6], False, True)) self.assertTrue( f( 3, 0, [9], False, True)) self.assertTrue( f(13,13, [], False, True)) self.assertTrue( f(13,39, [], False, True)) self.assertTrue( f(13, 0, [39], False, True)) self.assertTrue( f(13,15, [24], False, True)) self.assertTrue( f(13,15, [], False, True)) self.assertTrue( f(13,12, [3], False, True)) self.assertFalse( f(13,14, [], False, True)) self.assertFalse( f( 6, 1, [3,6,9], False, True)) self.assertTrue( f( 7, 1, [3,6,9], False, True)) self.assertFalse( f( 8, 1, [3,6,9], False, True))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_two_and_three_card_petitions(self):\n f = gtrutils.check_petition_combos\n\n self.assertTrue( f( 0, 0, [], True, True))\n\n self.assertFalse( f( 1, 0, [], True, True))\n self.assertFalse( f( 1, 0, [1], True, True))\n self.assertTrue( f( 1, 0, [2], True, True))\n self.assertTrue( f( 1, 0, [3], True, True))\n self.assertFalse( f( 1, 0, [4], True, True))\n self.assertTrue( f( 1, 1, [], True, True))\n self.assertTrue( f( 1, 2, [], True, True))\n self.assertTrue( f( 1, 3, [], True, True))\n self.assertFalse( f( 1, 4, [], True, True))\n\n self.assertFalse( f( 1, 1, [2], True, True))\n self.assertFalse( f( 1, 1, [3], True, True))\n self.assertFalse( f( 1, 2, [2], True, True))\n self.assertFalse( f( 1, 3, [2], True, True))\n self.assertFalse( f( 1, 3, [3], True, True))\n\n self.assertTrue( f( 2, 1, [2], True, True))\n self.assertTrue( f( 2, 1, [3], True, True))\n self.assertTrue( f( 2, 0, [4], True, True))\n self.assertTrue( f( 2, 0, [5], True, True))\n self.assertTrue( f( 2, 0, [6], True, True))\n self.assertTrue( f( 2, 4, [], True, True))\n self.assertTrue( f( 2, 5, [], True, True))\n self.assertTrue( f( 2, 6, [], True, True))\n \n self.assertTrue( f(13, 26, [], True, True))\n self.assertTrue( f(13, 39, [], True, True))\n self.assertTrue( f(13, 0, [26], True, True))\n self.assertTrue( f(13, 14, [12], True, True))\n self.assertTrue( f(13, 13, [10], True, True))\n self.assertTrue( f(13, 15, [11], True, True))\n self.assertFalse( f(13, 40, [], True, True))\n self.assertFalse( f(13, 11, [3], True, True))\n\n self.assertFalse( f(4, 1, [2,3,6], True, True))\n self.assertTrue( f(5, 1, [2,3,6], True, True))\n self.assertTrue( f(6, 1, [2,3,6], True, True))\n self.assertFalse( f(7, 1, [2,3,6], True, True))", "def test_only_two_card_petitions(self):\n f = gtrutils.check_petition_combos\n\n self.assertTrue( f( 0, 0, [0], True, False))\n\n self.assertFalse( f( 1, 0, [], True, False))\n self.assertFalse( f( 1, 0, [1], True, False))\n self.assertTrue( f( 1, 0, [2], True, False))\n self.assertFalse( f( 1, 0, [3], True, False))\n self.assertFalse( f( 1, 0, [4], True, False))\n\n self.assertTrue( f( 1, 1, [], True, False))\n self.assertFalse( f( 1, 1, [2], True, False))\n\n self.assertFalse( f( 2, 0, [2], True, False))\n self.assertFalse( f( 2, 0, [3], True, False))\n self.assertTrue( f( 2, 0, [4], True, False))\n self.assertFalse( f( 2, 0, [5], True, False))\n \n self.assertTrue( f( 2, 1, [2], True, False))\n self.assertFalse( f( 2, 1, [3], True, False))\n self.assertFalse( f( 2, 1, [4], True, False))\n\n self.assertTrue( f(13, 26, [], True, False))\n self.assertTrue( f(13, 0, [26], True, False))\n self.assertTrue( f(13, 14, [12], True, False))\n self.assertTrue( f(13, 13, [10], True, False))\n self.assertFalse( f(13, 15, [11], True, False))\n\n self.assertFalse( f( 6, 1, [2,4,6], True, False))\n self.assertTrue( f( 7, 1, [2,4,6], True, False))\n self.assertFalse( f( 8, 1, [2,4,6], True, False))", "def test_hand_has_three_of_a_kind(hand, card_list, expected):\n hand.add_cards(card_list)\n assert hand.has_three_of_a_kind() == expected", "def is_ok_three_lines(line1, line2, line3):\n card1 = line1[0]\n card2 = line1[1]\n card3 = line1[2]\n card4 = line2[0]\n card5 = line2[1]\n card6 = line2[2]\n\n card7 = line3[0]\n card8 = line3[1]\n card9 = line3[2]\n idents1 = [card.ident for card in line1]\n idents2 = [card.ident for card in line2]\n idents3 = [card.ident for card in line3]\n\n intersection = list(set(idents1) & set(idents2))\n if intersection:\n dprint(\"intersection 12\")\n return False\n\n intersection = list(set(idents1) & set(idents3))\n if intersection:\n return False\n\n intersection = list(set(idents2) & set(idents3))\n if intersection:\n return False\n\n print(\"??????????????\")\n show_triple(line1, line2, line3)\n print(\"??????????????\")\n\n if not is_ok_two_lines(line1, line2):\n return False\n if not is_ok_two_lines(line2, line3):\n return False\n\n return True", "def is_three_of_a_kind(hand):\n count = {c:0 for c in cards.keys()}\n for card in hand:\n count[card[0]] += 1\n for c in count:\n if count[c] == 3:\n return (True, cards[c])\n return None", "def has_3_spades(self):\n if Card('3', 'spades') in self.hand:\n return True\n return False", "def is_three_of_a_kind(hand):\n\tis_a_three_of_a_kind = False\n\ti = 0\n\twhile i < 13:\n\t\tif hand[i] == 3:\n\t\t\tis_a_three_of_a_kind = True\n\t\ti += 1 \n\t\t\n\thigh_card = 0\n\tj = 0\n\twhile j < 13 and is_a_three_of_a_kind == True:\n\t\tif hand[j] == 3 and j >= high_card:\n\t\t\thigh_card = j\n\t\tj += 1\n\tif is_a_three_of_a_kind:\n\t\treturn True, high_card\n\telse:\n\t\treturn False", "def has_three_of_a_kind(self):\n self.suit_hist()\n for val in self.ranks.values():\n if val >= 3:\n self.rank_per_hand['2'] = \"three of a kind\"\n return True\n return False", "def is_three_channeled(value):\n return len(value) == 3", "def check_cards(self, cards):\n if len(cards) != 3:\n return False\n\n match = 0\n card1 = cards[0][1]\n card2 = cards[1][1]\n card3 = cards[2][1]\n\n match += self.compare_element(card1, card2, card3, 'shape')\n match += self.compare_element(card1, card2, card3, 'colour')\n match += self.compare_element(card1, card2, card3, 'count')\n match += self.compare_element(card1, card2, card3, 'fill')\n\n return match == 4", "def test_check_three_of_a_kind_false(self):\n not_three_of_a_kind_fixtures = [[1, 2, 3, 4, 5],\n [1, 1, 2, 2, 3],\n [1, 1, 2, 3, 4]\n ]\n\n for fixture in not_three_of_a_kind_fixtures:\n score = self.roll.check_three_of_a_kind(fixture)\n\n self.assertNotEqual(score, sum(fixture))\n self.assertEqual(score, 0)\n self.assertEqual(len(fixture), 5)", "def check_cards_eligibility(self):\n for c in self.hand:\n c.check_actions(self)\n for c in self.phand:\n c.check_actions(self)\n for c in self.discard:\n c.check_actions(self)\n for c in self.active_player.phand:\n c.check_actions(self)\n for c in self.active_player.hand:\n c.check_actions(self)\n for c in self.active_player.discard:\n c.check_actions(self)\n for c in self.played_user_cards:\n c.check_actions(self)\n if ACTION_KEEP in self.actions:\n for p in self.players:\n for c in p.phand:\n c.check_actions(self)\n for c in p.hand:\n c.check_actions(self)\n for c in p.discard:\n c.check_actions(self)", "def test_partial_deck_doesnt_have_ignored_cards(self):\n self.assertEqual(self.ignoredCardPresent, False)", "def check_sum_three(agent):\n return sum(agent.received[-3:]) == 3", "def test_special_U3(self):\n self.check_oneq_special_cases(U3Gate(0.0, 0.1, -0.1).to_matrix(), \"U3\", {})\n self.check_oneq_special_cases(U3Gate(0.0, 0.1, 0.2).to_matrix(), \"U3\", {\"u3\": 1})\n self.check_oneq_special_cases(U3Gate(np.pi / 2, 0.2, 0.0).to_matrix(), \"U3\", {\"u3\": 1})\n self.check_oneq_special_cases(U3Gate(np.pi / 2, 0.0, 0.2).to_matrix(), \"U3\", {\"u3\": 1})\n self.check_oneq_special_cases(U3Gate(0.11, 0.27, 0.3).to_matrix(), \"U3\", {\"u3\": 1})", "def check_restricted_allowed_in_deck(deck_format, current_deck, card_name):\n # TODO: Do this\n return False", "def test_partial_deck_has_fewer_cards(self):\n self.assertEqual(len(self.partialDeck.deck), 46)", "def is_3flush(holecards, flop, required_holecards=2):\n assert 0 <= required_holecards <= 2\n suit1, suit2 = [card.suit for card in holecards]\n hand = tuple(chain(holecards, flop))\n suit_counts = Counter([card.suit for card in hand])\n\n for suit in suit_counts:\n if suit_counts[suit] == 3:\n if required_holecards == 2 and (suit1 == suit2 == suit):\n return True\n elif required_holecards == 1:\n if (suit1 == suit or suit2 == suit):\n return True\n elif required_holecards == 0:\n return True\n return False", "def validate_cards(self, cards_list):\n return set(self.hand).issubset(set(cards_list))", "def _is_valid_three_tupple(interface):\n if '/' in interface:\n s = interface.split('/')\n # Length is checked against three because of rbridge,slot,port\n if len(s) != 3:\n LOG.error(_LE(\"_is_valid_three_tupple:\"\n \"invalid interface %s configure\"\n \"valid interface\"), interface)\n return False\n return True\n return False", "def test_check_three_of_a_kind_true(self):\n three_of_a_kind_fixtures = [[1, 1, 1, 1, 1],\n [1, 1, 1, 1, 2],\n [1, 1, 1, 2, 2],\n [2, 1, 1, 1, 2],\n [2, 2, 1, 1, 1],\n [2, 1, 1, 1, 1],\n ]\n\n for fixture in three_of_a_kind_fixtures:\n score = self.roll.check_three_of_a_kind(fixture)\n\n self.assertEqual(score, sum(fixture))\n self.assertEqual(len(fixture), 5)", "def check_valid(self, cards):\n\n if len(cards) == 1: # one card\n return True\n if len(cards) == 2: # two cards\n if ((self.num_to_card(int(cards[0])) == self.num_to_card(int(cards[1]))) or # two same cards\n (int(cards[0]) > 51) or # any card and a joker\n (int(cards[1])) > 51): # any card and a joker\n return True\n return False\n\n # 3 or more: all same number/ascending order\n # check how many jokers\n jokers = 0\n for card in cards:\n #print(int(card))\n #print(self.num_to_card(card))\n if int(card) > 51:\n jokers += 1\n #print(\"YESSSSSSSSSSIR\")\n #print(f'[THERE ARE {jokers} JOKERS]')\n\n # check if all same number\n sort = sorted(cards)\n #print(f'[THE SORTED CARDS: {sort}]')\n index = 0\n for card in sort:\n if self.num_to_card(int(card)) == self.num_to_card(int(sort[0])) or int(card) > 51:\n index += 1\n if index == len(cards):\n return True\n\n # check ascend order\n if not self.is_same_sign(cards):\n print('Here')\n return False\n\n #print(\"accend left\")\n return self.ascend(cards, jokers)", "def test_shared_cards_len(self):\n self.assertEqual(len(self.hand.sharedCards), 3)", "def test_play_card(self):\n self.plr.piles[Piles.DECK].set(\"Silver\", \"Province\", \"Moat\", \"Gold\")\n self.vic.piles[Piles.DECK].set(\"Duchy\")\n self.plr.test_input = [\"discard\", \"discard\", \"putback\"]\n self.plr.play_card(self.card)\n self.g.print_state()\n self.assertEqual(self.plr.actions.get(), 1)\n self.assertIn(\"Duchy\", self.vic.piles[Piles.DISCARD])\n self.assertIn(\"Gold\", self.plr.piles[Piles.DISCARD])\n self.assertIn(\"Province\", self.plr.piles[Piles.HAND])\n self.assertIn(\"Moat\", self.plr.piles[Piles.HAND])\n self.assertIn(\"Silver\", self.plr.piles[Piles.DECK])", "def test_multiple_of_3(self):\n for n in self.multiples_of_3:\n x = Multiple.check_number(n)\n self.assertEqual(x, \"Three\")", "def is_3straight(holecards, flop, required_holecards=2):\n assert 0 <= required_holecards <= 2\n rank1, rank2 = sorted_numerical_ranks(holecards)\n hand = tuple(chain(holecards, flop))\n\n for subseq in rank_subsequences(hand):\n x, y, z = subseq\n if x == y-1 == z-2:\n if x == 1:\n # Special case for Ace playing low, to allow\n # for the `rank in subseq` check to work\n subseq.append(14)\n if required_holecards == 2:\n if rank1 in subseq and rank2 in subseq:\n return True\n elif required_holecards == 1:\n if rank1 in subseq or rank2 in subseq:\n return True\n elif required_holecards == 0:\n return True\n return False", "def testProtractedNSESanityChecks(self):\n self.assertGreater(self.c3.get_species_richness(1), self.c2.get_species_richness(1))\n self.assertLess(self.c4.get_species_richness(1), self.c3.get_species_richness(1))", "def test_privatize_fountain_card(self):\n g = Game()\n g.add_player(uuid4(), 'p0')\n g.add_player(uuid4(), 'p1')\n\n gs = g\n p0, p1 = gs.players\n\n latrine, insula, statue, road = cm.get_cards(['Latrine', 'Insula', 'Statue', 'Road'])\n p0.fountain_card = latrine\n\n gs_private = g.privatized_game_state_copy('p1')\n p0, p1 = gs_private.players\n\n self.assertEqual(p0.fountain_card, Card(-1))", "def test_play(self):\n self.plr.piles[Piles.DECK].set(\"Province\")\n self.plr.add_card(self.card, Piles.HAND)\n self.plr.test_input = [\"keep\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.coins.get(), 2)\n self.assertIn(\"Province\", self.plr.piles[Piles.DECK])\n self.assertNotIn(\"Province\", self.plr.piles[Piles.DISCARD])", "def test_play_no_gain(self):\n self.card = self.g[\"Festival\"].remove()\n self.plr.piles[Piles.HAND].set(\"Duchy\")\n self.plr.add_card(self.card, Piles.HAND)\n self.plr.favors.set(2)\n self.plr.test_input = [\"No\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.favors.get(), 2)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 1)", "def test_triple_pile_driver(self):\n self.validate_goal_for('game-20121231-142658-fc8f047a.html',\n u'dominion cartel',\n 'TriplePileDriver')", "def test_deal_insufficient_cards(self):\n cards = self.deck._deal(100)\n self.assertEqual(len(cards), 52)\n self.assertEqual(self.deck.count(), 0)", "def choose_validator(payload, chosen):\n _has_theme = has_theme(payload[\"cards\"], payload[\"theme\"])\n special_tuple = (\n SkullEnum.WHITE,\n SkullEnum.MERMAID,\n SkullEnum.PIRATE,\n SkullEnum.GREENPIRATE,\n SkullEnum.SKULLKING,\n )\n\n if not chosen.isdecimal():\n print(f\"Choose a number between 1 and {len(payload['cards'])}\")\n return False\n if not (1 <= int(chosen) <= len(payload[\"cards\"])):\n print(f\"Choose a number between 1 and {len(payload['cards'])}\")\n return False\n if (\n _has_theme\n and payload[\"cards\"][int(chosen) - 1].CARDTYPE not in special_tuple\n and payload[\"cards\"][int(chosen) - 1].CARDTYPE != payload[\"theme\"]\n ):\n print(\n f\"You have a card of the theme {payload['theme']}. You must choose that card\"\n )\n return False\n\n return True", "def test_create_card_missing_variety(self): # pylint: disable=invalid-name\n data = {\n 'first_name': 'Ty',\n 'last_name': 'Cobb',\n }\n resp = self.app.post('cards', json=data)\n\n assert resp.status_code == 200\n\n assert data['first_name'] == resp.json['first_name']\n assert data['last_name'] == resp.json['last_name']\n assert resp.json['variety'] is None", "def org_clump_tester(clump):\n tester = True\n for block in clump:\n if len(clump) >= 3: # clump should be block!\n tester = False\n break\n return tester", "def test_numcards_is_two(self):\n self.assertEqual(self.hand.numCards, 5)", "def test_discard_buy(self):\n self.plr.test_input = [\"finish selecting\", \"discard gold\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 2)\n self.assertEqual(self.plr.actions.get(), 1)\n self.assertEqual(self.plr.buys.get(), 2)\n self.assertNotIn(\"Gold\", self.plr.piles[Piles.HAND])", "def in_suit3(list, list0):\n text = list.replace(\"-\", \"\")\n text0 = list0.replace(\"-\", \"\")\n if (\"-\" in list) and (\"-\" in list0) and (text.isdigit() is True) and (text0.isdigit() is True):\n\n list1 = list.split(\"-\")\n x = int(list1[0])\n suit = set()\n suit.add(x)\n while x < int(list1[len(list1) - 1]):\n x += 1\n suit.add(x)\n suit.add(int(list1[len(list1) - 1]))\n\n list2 = list0.split(\"-\")\n y = int(list2[0])\n suit0 = set()\n suit0.add(y)\n while y < int(list2[len(list2) - 1]):\n y += 1\n suit0.add(y)\n suit0.add(int(list2[len(list2) - 1]))\n temp = [item for item in suit if item in suit0]\n if len(temp) > 0: return True\n\n return False", "def test_numcards_is_two(self):\n self.assertEqual(self.hand.numCards, 2)", "def test_seven_cards_poker(self):\n self.assertEqual(best_hand(\"6C 7C 8C 9C TC 5C JS\".split()),\n ('6C', '7C', '8C', '9C', 'TC'))\n self.assertEqual(best_hand(\"TD TC TH 7C 7D 8C 8S\".split()),\n ('TD', 'TC', 'TH', '8C', '8S'))\n self.assertEqual(best_hand(\"JD TC TH 7C 7D 7S 7H\".split()),\n ('JD', '7C', '7D', '7S', '7H'))", "def third_street ():\r\n global all_hands\r\n global deck\r\n global players\r\n #Set of all cards for third street draw \r\n third_street_draws = random.sample(deck, len(players)*3)\r\n #Remove drawn cards from deck\r\n for card in third_street_draws:\r\n deck.remove(card)\r\n #Deal 1 Card Each Player Until 3, then reveal third street.\r\n for player in players:\r\n hand = []\r\n for i in range(0,3):\r\n hand.append(third_street_draws[player+len(players)*i])\r\n all_hands.append(hand)\r\n if player == you:\r\n print(\"Your hand is: \", str(all_hands[you]))\r\n else:\r\n print(\"Player \", str(player+1), \"'s 3rd Street hand is: \", str(hand[2]))", "def isLegal(self):\n counter = 0\n for t in self.types:\n if t > 0:\n counter = counter + 1\n if counter < 4:\n return True\n else:\n return False", "def test_hand_is_four_of_a_kind(hand, card_list, expected):\n hand.add_cards(card_list)\n assert hand.is_four_of_a_kind() == expected", "def test_deck_validation(self):\n \tpass", "def test_mano_3():\n g = get_card_by_description\n r = Round([\n [g(\"3 de Oro\"), g(\"5 de Oro\"), g(\"6 de Oro\")],\n [g(\"3 de Espada\"), g(\"5 de Basto\"), g(\"6 de Oro\")],\n ])\n assert r.result() == 1", "def check_deck_legality(deck: Deck, tourney_format: str = tourney_format):\n if tourney_format == \"commander\":\n main, side = deck\n main_count = count_dict(main)\n side_count = count_dict(side)\n\n # Check count\n if not (98 <= main_count <= 100):\n raise ValueError(\n \"Number of cards in main ({}) is illegal\".format(main_count)\n )\n if not (0 <= side_count <= 3):\n raise ValueError(\n \"Number of cards in side ({}) is illegal\".format(side_count)\n )\n if not (100 <= main_count + side_count <= 101):\n raise ValueError(\n \"Total number of cards ({}) is illegal\".format(\n main_count + side_count\n )\n )\n\n max_color_id = get_max_color_id(deck)\n\n # check legality of cards\n combined = combine_main_side(main, side)\n\n has_uncard = False\n for card_name in combined:\n card_data = scryfall_cache.get_card_data(card_name.split(\"(\")[0].strip())\n count = combined[card_name.split(\"(\")[0].strip()]\n try:\n scryfall_utils.card_ok(\n card_data,\n count,\n tourney_format=tourney_format,\n has_color_id=True,\n color_id=max_color_id,\n )\n except ValueError as err:\n if (\n scryfall_utils.is_in_unset(card_data)\n and not has_uncard\n and count == 1\n and card_name in side\n ):\n # we allow one\n has_uncard = True\n else:\n raise err\n\n else:\n raise NotImplementedError(\n \"Format {} is not supported\".format(tourney_format)\n )\n\n return True", "def simple_validator(passport):\n if len(passport) == 8:\n return True\n if len(passport) == 7 and \"cid\" not in passport:\n return True\n return False", "def naive_partition3(nums: List[int]) -> bool:\n target, remaining = divmod(sum(nums), 3)\n if remaining:\n return False\n\n def sum_subset3(nums: List[int], n: int, a: int, b: int, c: int) -> bool:\n if a == 0 and b == 0 and c == 0:\n return True\n if n < 0:\n return False\n\n used_in_a = used_in_b = used_in_c = False\n\n if a - nums[n] >= 0:\n used_in_a = sum_subset3(nums, n - 1, a - nums[n], b, c)\n\n if not used_in_a and b - nums[n] >= 0:\n used_in_b = sum_subset3(nums, n - 1, a, b - nums[n], c)\n\n if (not used_in_a and not used_in_b) and c - nums[n] >= 0:\n used_in_c = sum_subset3(nums, n - 1, a, b, c - nums[n])\n\n return used_in_a or used_in_b or used_in_c\n\n return sum_subset3(nums, len(nums) - 1, target, target, target)", "def test_get_scorable_3pair():\n roll = np.array([1, 1, 5, 5, 2, 2])\n expected = {\n \"one\": 2,\n \"five\": 2,\n \"three-ones\": False,\n \"three-twos\": False,\n \"three-threes\": False,\n \"three-fours\": False,\n \"three-fives\": False,\n \"three-sixes\": False,\n \"four-of-a-kind\": False,\n \"three-and-one\": False,\n \"five-of-a-kind\": False,\n \"six-of-a-kind\": False,\n \"straight\": False,\n \"three-pairs\": True,\n \"four-and-pair\": False,\n \"triplets\": False,\n }\n actual = analyze_roll.get_scorable(roll)\n assert expected == actual", "def is_ok_line(line):\n card1 = line[0]\n card2 = line[1]\n card3 = line[2]\n\n if not is_coupled(card1.east, card2.west):\n return False\n if not is_coupled(card2.east, card3.west):\n return False\n return True", "def is_valid_deck(deck: List[int]) -> bool:\n check_deck = []\n check_deck.extend(deck)\n check_deck.sort()\n return len(check_deck) >= 3 and \\\n all(isinstance(item, int) for item in check_deck) \\\n and len(check_deck) == check_deck[-1]", "def test_discard_action(self):\n self.plr.test_input = [\"discard silver\", \"finish selecting\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 2)\n self.assertEqual(self.plr.actions.get(), 2)\n self.assertEqual(self.plr.buys.get(), 1)\n self.assertNotIn(\"Silver\", self.plr.piles[Piles.HAND])", "def test_make_3bit_errors(self):\r\n bitvecs = golay._make_3bit_errors()\r\n self.assertTrue(list([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0,\r\n 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]) in map(list, bitvecs))\r\n self.assertFalse(list([0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0,\r\n 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0]) in map(list, bitvecs))", "def useless(input1, input2, input3):\n return 3", "def test_for_non_splittable_hand(self):\n hand = self._hand\n cards = [BjCard('clubs', '7'), BjCard('diamonds', '4')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.can_split, False)", "def test_check_category_input_3(self):\n choices = [(1, 'choice 1'), (2, 'choice 2')]\n assert validation.check_category_input(2, choices) == True", "def differentiate_cards(card):\n\t\tdef High_Card(numbers,colors):\n\t\t\treturn len(set(numbers)) == 5\n\t\tdef One_Pair(numbers,colors):\n\t\t\treturn len(set(numbers)) == 4\n\t\tdef Two_Pairs(numbers,colors):\n\t\t\tif len(set(numbers)) != 3:\n\t\t\t\treturn False\n\t\t\treturn [numbers.count(i) for i in numbers].count(2) == 4\n\t\tdef Three_of_a_Kind(numbers,colors):\n\t\t\tif len(set(numbers)) != 3:\n\t\t\t\treturn False\n\t\t\tfor i in numbers:\n\t\t\t\tif numbers.count(i) == 3:\n\t\t\t\t\treturn True\n\t\t\treturn False\n\t\tdef Straight(numbers,colors):\n\t\t\tfor i in xrange(1,len(numbers)):\n\t\t\t\tif numbers[i] - numbers[i-1] != 1:\n\t\t\t\t\treturn False\n\t\t\treturn True\n\t\tdef Flush(numbers,colors):\n\t\t\treturn len(set(colors)) == 1\n\t\tdef Full_House(numbers,colors):\n\t\t\tnumbers_set = set(numbers)\n\t\t\tif len(numbers_set) != 2:\n\t\t\t\treturn False\n\t\t\ta = numbers[0]\n\t\t\tb= [x for x in numbers if x != a][0]\n\t\t\treturn (numbers.count(a) == 2 and numbers.count(b) == 3) or\\\n\t\t\t\t(numbers.count(a) == 3 and numbers.count(b) == 2)\n\t\tdef Four_of_a_Kind(numbers,colors):\n\t\t\tfor i in set(numbers):\n\t\t\t\tif numbers.count(i) == 4:\n\t\t\t\t\treturn True\n\t\t\treturn False\n\t\tdef Straight_Flush(numbers,colors):\n\t\t\treturn Straight(numbers,colors) and Flush(numbers,colors)\n\t\tdef Royal_Flush(numbers,colors):\n\t\t\tRoyal = [10,11,12,13,14]\n\t\t\treturn numbers == Royal and Flush(numbers,colors)\n\n\t\tcards = {'1':1,'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9,\n\t\t 'T':10,'t':10,'J':11,'j':11,'Q':12,'q':12,'K':13,'k':13,'A':14,'a':14}\n\t\tnumbers = [cards[i[0]] for i in card]\n\t\tnumbers.sort()\n\t\tcolors = [i[1] for i in card]\n\t\t\n\t\tif Royal_Flush(numbers,colors):return 9\n\t\telif Straight_Flush(numbers,colors):return 8\n\t\telif Four_of_a_Kind(numbers,colors):return 7\n\t\telif Full_House(numbers,colors):return 6\n\t\telif Flush(numbers,colors):return 5\n\t\telif Straight(numbers,colors):return 4\n\t\telif Three_of_a_Kind(numbers,colors):return 3\n\t\telif Two_Pairs(numbers,colors):return 2\n\t\telif One_Pair(numbers,colors):return 1\n\t\telif High_Card(numbers,colors):return 0", "def one_in_three():\n chance = random.randrange(0, 3)\n return chance", "def test_3(self):\n c1 = Store.Customer(\"harold\", \"qcf\", True)\n self.assertTrue(c1.is_premium_member(), \"not premium member\")", "def can_be_played(cls, card, context={}):\n\t\treturn True", "def multi_player_support(self, num_of_players):\n if self.screen['columns'] / num_of_players > 40:\n return True\n else:\n return False", "def test_deal_sufficient_cards(self):\n cards = self.deck._deal(10)\n self.assertEqual(len(cards), 10)\n self.assertEqual(self.deck.count(), 42)", "def is_proper(i0, i1, i2, i3, bond_set):\n if (i0, i1) in bond_set and (i1, i2) in bond_set and (i2, i3) in bond_set and len(set([i0, i1, i2, i3])) == 4:\n return True\n return False", "def test_validate_available_choice_3(self):\n self.assertIsNone(validate_available_choice(BeerStyle, BeerStyle.LAGER))", "def corrected_clump_tester(clump):\n tester = True\n for block in clump:\n if len(block) >= 3: # Fixed block!\n tester = False\n break\n return tester", "def test_main_ignCH3(self, capsys):\n args = self.args.copy()\n args[\"ignore_CH3s\"] = True\n UI.main(**args)\n captured = capsys.readouterr().out\n assert \"(--ignore-CH3s activated)\" in captured", "def test_4(self):\n c1 = Store.Customer(\"harold\", \"qcf\", False)\n self.assertFalse(c1.is_premium_member(), \"IS premium member\")", "def test_three_arms_one_unsampled_arm(self):\n self._test_three_arms_one_unsampled_arm()", "def check_for_combat():\n if random.randint(1, 4) == 1:\n return True\n else:\n return False", "def check_cart(cart):\n return 0 <= cart[0] < grid_size and 0 <= cart[1] < grid_size and 0 <= cart[2] < grid_size", "def enough_players():\n return True", "def test_for_non_blackjack(self):\n hand = self._hand\n cards = [BjCard('clubs', '8'), BjCard('diamonds', '8')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.is_blackjack, False)", "def test_check_four_of_a_kindfalse(self):\n not_four_of_a_kind_fixtures = [[1, 1, 1, 2, 2],\n [2, 1, 1, 1, 2],\n [2, 2, 1, 1, 1],\n [1, 2, 3, 4, 5],\n ]\n\n for fixture in not_four_of_a_kind_fixtures:\n score = self.roll.check_four_of_a_kind(fixture)\n\n self.assertNotEqual(score, sum(fixture))\n self.assertEqual(score, 0)\n self.assertEqual(len(fixture), 5)", "def test_for_splittable_hand_with_aces(self):\n hand = self._hand\n cards = [BjCard('clubs', 'A'), BjCard('diamonds', 'A')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.can_split, True)", "def is_valid(user_input, card_type=None, skip=False):\n \n i = user_input.upper()\n if i == 'Q':\n exit(\"\\nExiting program. Thanks for using Clue Detective!\\n\")\n if skip:\n if i == 'X':\n return True\n if card_type:\n key_list = [key for key in Board.input_decoder \n if Board.input_decoder[key].type == card_type]\n if i in key_list:\n return True\n elif not card_type:\n if i in Board.input_decoder:\n return True \n else:\n return False", "def test_special_contacts(self):\n\n vcards = []\n\n # Generate a contact with no email address\n current_contact = bt_contacts_utils.VCard()\n current_contact.first_name = \"Mr.\"\n current_contact.last_name = \"Smiley\"\n current_contact.add_phone_number(\n bt_contacts_utils.generate_random_phone_number())\n vcards.append(current_contact)\n\n # Generate a 2nd contact with the same name but different phone number\n current_contact = bt_contacts_utils.VCard()\n current_contact.first_name = \"Mr.\"\n current_contact.last_name = \"Smiley\"\n current_contact.add_phone_number(\n bt_contacts_utils.generate_random_phone_number())\n vcards.append(current_contact)\n\n # Generate a contact with no name\n current_contact = bt_contacts_utils.VCard()\n current_contact.email = \"{}@gmail.com\".format(\n bt_contacts_utils.generate_random_string())\n current_contact.add_phone_number(\n bt_contacts_utils.generate_random_phone_number())\n vcards.append(current_contact)\n\n # Generate a contact with random characters in its name\n current_contact = bt_contacts_utils.VCard()\n current_contact.first_name = bt_contacts_utils.generate_random_string()\n current_contact.last_name = bt_contacts_utils.generate_random_string()\n current_contact.add_phone_number(\n bt_contacts_utils.generate_random_phone_number())\n vcards.append(current_contact)\n\n # Generate a contact with only a phone number\n current_contact = bt_contacts_utils.VCard()\n current_contact.add_phone_number(\n bt_contacts_utils.generate_random_phone_number())\n vcards.append(current_contact)\n\n # Generate a 2nd contact with only a phone number\n current_contact = bt_contacts_utils.VCard()\n current_contact.add_phone_number(\n bt_contacts_utils.generate_random_phone_number())\n vcards.append(current_contact)\n\n bt_contacts_utils.create_new_contacts_vcf_from_vcards(\n self.contacts_destination_path, PSE_CONTACTS_FILE, vcards)\n\n phone_numbers_added = bt_contacts_utils.import_device_contacts_from_vcf(\n self.pse, self.contacts_destination_path, PSE_CONTACTS_FILE)\n\n return self.connect_and_verify(phone_numbers_added)", "def require_either_cards(card_names: List[str], log: SimpleLogger,\n soltype: str, sol: int, subcase: Subcase,\n error, ierror: int, nerrors):\n msg = ''\n nlocal_errors = 0\n for card_name in card_names:\n if card_name not in subcase:\n msg += f'A {card_name} card is required for {soltype} - SOL {sol:d}\\n{subcase}'\n nlocal_errors += 1\n if nlocal_errors == len(card_names):\n ierror += 1\n log.error(msg)\n if ierror == nerrors:\n raise error(msg)\n return ierror", "def test_four_kind(self):\n self.assertEqual(poker([self.fk, self.fh]), [self.fk])", "def is_valid_retrieval(self, card_index):\n return card_index == 0", "def is_valid_retrieval(self, card_index):\n return card_index == 0", "def is_fivefold_repetition(self) -> bool:\n return self.is_repetition(3)", "def test_three_arms_two_winners(self):\n self._test_three_arms_two_winners()", "def checkDoubles(self,card): # need to check defenders handcount...\n multipleCards = [card]\n for i in range(4): # checking all other possible cards of same rank\n card_plus = card + 13 * i # checking higher values\n card_minus = card - 13 * i # checking lower values\n if card_plus in self.currentHand and card_plus < 51 and card_plus != card and card_plus not in multipleCards:\n print(\"Do you wish to add:\")\n cardManager.printHand([card_plus])\n prompt= input(\"to your attack? (y/n):\")\n while prompt != 'y' and prompt != 'n': # input checking\n print(\"Do you wish to add:\")\n cardManager.printHand([card_plus])\n prompt = input(\"to your attack? (y/n):\")\n if prompt == 'y':\n print(\"added\")\n multipleCards.append(card_plus)\n self.currentHand.remove(card_plus)\n else:\n print(\"Did not add\")\n if card_minus in self.currentHand and card_minus > 0 and card_plus != card and card_minus not in multipleCards:\n print(\"Do you wish to add:\")\n cardManager.printHand([card_minus])\n prompt = input(\"to your attack? (y/n):\")\n while prompt != 'y' and prompt != 'n': # input checking\n print(\"Do you wish to add:\")\n cardManager.printHand([card_minus])\n prompt = input(\"to your attack? (y/n):\")\n if prompt == 'y':\n print(\"added\")\n multipleCards.append(card_minus)\n self.currentHand.remove(card_minus)\n else:\n print(\"Did not add\")\n return multipleCards", "def validate_parameters(side_1, side_2, side_3):\n if side_1 > 0 and side_2 > 0 and side_3 > 0 and (side_1 + side_2 > side_3) and \\\n (side_1 + side_3 > side_2) and (side_3 + side_2 > side_1):\n return True\n else:\n return False", "def check_card_action(self, card):\n if card.value == \"7\":\n self.seven_punishment()\n elif card.value == \"8\":\n self.eight_punishment()\n elif card.value == \"9\":\n self.nine_punishment()\n elif card.value == \"B\":\n self.jack_wish()", "def testCard(self):\n # test1\n cardObj1 = Card('A','d')\n self.assertEquals(1,cardObj1.get_rank())\n self.assertEquals('d',cardObj1.get_suit())\n # test2\n cardObj2 = Card('J','d')\n self.assertEquals(10,cardObj2.get_rank())\n # test3\n cardObj3 = Card(5,'d')\n self.assertEquals(5,cardObj3.get_rank())", "async def should_handle(self):\n return self.main.base_amount > 4 and self.main.can_build_unique(UnitTypeId.INFESTATIONPIT, self.main.pits)", "def require_cards(card_names: List[str], log: SimpleLogger,\n soltype: str, sol: int, subcase: Subcase,\n error, ierror, nerrors):\n for card_name in card_names:\n if card_name not in subcase:\n msg = f'A {card_name} card is required for {soltype} - SOL {sol:d}\\n{subcase}'\n log.error(msg)\n if ierror == nerrors:\n raise error(msg)\n ierror += 1\n return ierror", "def is_int3(items):\n return len(items) == 3 and all(isinstance(item, int) for item in items)", "def test_creature(self):\n self.assertEqual(len(self.processor), 3)", "def is_valid_deck(deck):\n \n flag = True\n test_deck = []\n for i in range(1, len(deck) + 1):\n test_deck.append(i)\n for value in deck:\n if value not in test_deck:\n flag = False\n return flag", "def test_Utilities__test_3():\n assert test(False, 1) is None\n assert test(False, 1, False, 2) is None", "def can_complete_three_in_row(self, row_positions, board):\n\n row = [board.get_piece(row_positions[0][0], row_positions[0][1]), board.get_piece(row_positions[1][0], row_positions[1][1]), board.get_piece(row_positions[2][0], row_positions[2][1])]\n\n if row.count(' ') == 1 and row.count(self._piece) == 2:\n self_winner = row.index(' ')\n else:\n self_winner = -1\n\n\n if row.count(' ') == 1 and row.count(self._piece) == 0:\n opponent_winner = row.index(' ')\n else:\n opponent_winner = -1\n \n return (self_winner, opponent_winner)", "def cardDiscardable(self, card):\n if self.cardDead(card):\n return True\n\n cardAttr = \"\"\n if Suit.toString(card.getSuit()) == \"white\":\n cardAttr = \"w\"\n elif Suit.toString(card.getSuit()) == \"blue\":\n cardAttr = \"b\"\n elif Suit.toString(card.getSuit()) == \"red\":\n cardAttr = \"r\"\n elif Suit.toString(card.getSuit()) == \"green\":\n cardAttr = \"g\"\n elif Suit.toString(card.getSuit()) == \"yellow\":\n cardAttr = \"y\"\n\n if card.getValue() == 1:\n cardAttr += \"1\"\n elif card.getValue() == 2:\n cardAttr += \"2\"\n elif card.getValue() == 3:\n cardAttr += \"3\"\n elif card.getValue() == 4:\n cardAttr += \"4\"\n elif card.getValue() == 5:\n cardAttr += \"5\"\n\n if card.getValue() == 1:\n if self.discardedDict[cardAttr] < 2:\n self.discardedDict[cardAttr] += 1\n # print(3 - self.discardedDict[cardAttr], \"card remaining for \", cardAttr)\n return True\n elif card.getValue() == 2 or card.getValue() == 3 or card.getValue() == 4:\n if self.discardedDict[cardAttr] < 1:\n self.discardedDict[cardAttr] += 1\n # print(2 - self.discardedDict[cardAttr], \"card remaining for \", cardAttr)\n return True\n elif card.getValue() == 5:\n if self.discardedDict[cardAttr] < 0:\n self.discardedDict[cardAttr] += 1\n # print(1 - self.discardedDict[cardAttr], \"card remaining for \", cardAttr)\n return True\n # print(\"Useful card\")\n return False", "def test_fav_3(self):\n\t\tplayer_list = [Player(\"Kevin Knuckler\", 1, 100000, 10), Player(\"Larry Left\", 2, 100000, 20), Player(\"Morgan Mariner\", 2, 300000, 50), Player(\"Nick National\", 4, 200000, 70), Player(\"Oscar Outfield\", 1, 700000, 99), Player(\"Peter Pitcher\", 3, 100000, 5), Player(\"Quinn Quality\", 6, 400000, 44), Player(\"Ricky Right\", 7, 200000, 69)]\n\t\tself.assertEqual( free_agent_vorp(player_list, 1000000, 7), (213, 1000000, [\"Nick National\", \"Ricky Right\", \"Larry Left\", \"Quinn Quality\", \"Kevin Knuckler\" ]) )", "def test_get_scorable_three_and_1():\n roll = np.array([1, 1, 1, 1, 2, 5])\n expected = {\n \"one\": 4,\n \"five\": 1,\n \"three-ones\": False,\n \"three-twos\": False,\n \"three-threes\": False,\n \"three-fours\": False,\n \"three-fives\": False,\n \"three-sixes\": False,\n \"four-of-a-kind\": False,\n \"three-and-one\": True,\n \"five-of-a-kind\": False,\n \"six-of-a-kind\": False,\n \"straight\": False,\n \"three-pairs\": False,\n \"four-and-pair\": False,\n \"triplets\": False,\n }\n actual = analyze_roll.get_scorable(roll)\n assert expected == actual", "def test_does_suit_error(self):\n self.assertRaises(Exception,lambda: cardutils.Card(10,0))", "def testProtractedPostApplicationSanityChecks(self):\n self.assertLess(self.c.get_species_richness(1), self.c.get_species_richness(3))\n self.assertLess(self.c.get_species_richness(2), self.c.get_species_richness(4))\n self.assertLess(self.c.get_species_richness(5), self.c.get_species_richness(3))\n self.assertLess(self.c.get_species_richness(6), self.c.get_species_richness(4))\n self.assertEqual(4, self.c.get_species_richness(1))\n self.assertEqual(4, self.c.get_species_richness(2))\n self.assertEqual(7, self.c.get_species_richness(3))\n self.assertEqual(7, self.c.get_species_richness(4))\n self.assertEqual(4, self.c.get_species_richness(5))\n self.assertEqual(4, self.c.get_species_richness(6))\n self.assertEqual(21, self.c.get_species_richness(7))\n self.assertEqual(38, self.c.get_species_richness(8))", "def testProtractedPostApplicationSanityChecks(self):\n self.assertLess(self.c.get_species_richness(1), self.c.get_species_richness(3))\n self.assertLess(self.c.get_species_richness(2), self.c.get_species_richness(4))\n self.assertLess(self.c.get_species_richness(5), self.c.get_species_richness(3))\n self.assertLess(self.c.get_species_richness(6), self.c.get_species_richness(4))\n self.assertEqual(4, self.c.get_species_richness(1))\n self.assertEqual(4, self.c.get_species_richness(2))\n self.assertEqual(7, self.c.get_species_richness(3))\n self.assertEqual(7, self.c.get_species_richness(4))\n self.assertEqual(4, self.c.get_species_richness(5))\n self.assertEqual(4, self.c.get_species_richness(6))\n self.assertEqual(21, self.c.get_species_richness(7))\n self.assertEqual(38, self.c.get_species_richness(8))", "def testProtractedPostApplicationSanityChecks(self):\n self.assertLess(self.c.get_species_richness(1), self.c.get_species_richness(3))\n self.assertLess(self.c.get_species_richness(2), self.c.get_species_richness(4))\n self.assertLess(self.c.get_species_richness(5), self.c.get_species_richness(3))\n self.assertLess(self.c.get_species_richness(6), self.c.get_species_richness(4))\n self.assertEqual(4, self.c.get_species_richness(1))\n self.assertEqual(4, self.c.get_species_richness(2))\n self.assertEqual(7, self.c.get_species_richness(3))\n self.assertEqual(7, self.c.get_species_richness(4))\n self.assertEqual(4, self.c.get_species_richness(5))\n self.assertEqual(4, self.c.get_species_richness(6))\n self.assertEqual(21, self.c.get_species_richness(7))\n self.assertEqual(38, self.c.get_species_richness(8))" ]
[ "0.6810119", "0.6473808", "0.645677", "0.62938344", "0.62752783", "0.62059915", "0.6155211", "0.6148065", "0.60892385", "0.59840876", "0.596696", "0.5963209", "0.59501404", "0.5799793", "0.5697761", "0.5672481", "0.566434", "0.561755", "0.56103486", "0.5594359", "0.5582937", "0.55824095", "0.55776405", "0.5561703", "0.55474144", "0.5544486", "0.5530489", "0.5520645", "0.55172414", "0.54890114", "0.5468481", "0.54464144", "0.5441283", "0.54393774", "0.54274714", "0.5424063", "0.54138094", "0.5382392", "0.5367519", "0.5359802", "0.5358253", "0.5329416", "0.53261393", "0.5316245", "0.53097856", "0.5306796", "0.5290076", "0.52821606", "0.52812386", "0.5251884", "0.52460605", "0.52431774", "0.524229", "0.523195", "0.52286994", "0.52267295", "0.52143997", "0.5197196", "0.51955044", "0.5185743", "0.51828885", "0.5182067", "0.5180415", "0.51786554", "0.5177874", "0.5174064", "0.51681215", "0.51644677", "0.5164138", "0.5162975", "0.51608557", "0.5160479", "0.51584834", "0.51578593", "0.5150863", "0.5134784", "0.5131233", "0.5128404", "0.5124589", "0.5124589", "0.51126343", "0.5104829", "0.5099368", "0.50973964", "0.5094708", "0.50920314", "0.50905526", "0.5083186", "0.5080864", "0.5073877", "0.5068215", "0.5065914", "0.5061753", "0.50600374", "0.5057748", "0.50513214", "0.50505924", "0.5047971", "0.5047971", "0.5047971" ]
0.7429536
0
Test with only twocard petitions
def test_only_two_card_petitions(self): f = gtrutils.check_petition_combos self.assertTrue( f( 0, 0, [0], True, False)) self.assertFalse( f( 1, 0, [], True, False)) self.assertFalse( f( 1, 0, [1], True, False)) self.assertTrue( f( 1, 0, [2], True, False)) self.assertFalse( f( 1, 0, [3], True, False)) self.assertFalse( f( 1, 0, [4], True, False)) self.assertTrue( f( 1, 1, [], True, False)) self.assertFalse( f( 1, 1, [2], True, False)) self.assertFalse( f( 2, 0, [2], True, False)) self.assertFalse( f( 2, 0, [3], True, False)) self.assertTrue( f( 2, 0, [4], True, False)) self.assertFalse( f( 2, 0, [5], True, False)) self.assertTrue( f( 2, 1, [2], True, False)) self.assertFalse( f( 2, 1, [3], True, False)) self.assertFalse( f( 2, 1, [4], True, False)) self.assertTrue( f(13, 26, [], True, False)) self.assertTrue( f(13, 0, [26], True, False)) self.assertTrue( f(13, 14, [12], True, False)) self.assertTrue( f(13, 13, [10], True, False)) self.assertFalse( f(13, 15, [11], True, False)) self.assertFalse( f( 6, 1, [2,4,6], True, False)) self.assertTrue( f( 7, 1, [2,4,6], True, False)) self.assertFalse( f( 8, 1, [2,4,6], True, False))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testBeliefs1sk(self):", "def test_when_opponent_all_Ds(self):\n self.responses_test([C, C, C, C], [D, D, D, D], [D, D, D], random_seed=5)", "def test_three_arms_two_winners(self):\n self._test_three_arms_two_winners()", "def test_theft_and_stealing(self):", "def test_when_opponent_all_Ds(self):\n self.responses_test([D, D, D, D], [D, D, D, D], [D, D, D],\n random_seed=5)", "def test_affect_of_strategy(self):\n self.responses_test([C, C, C], [C, C, C], [C, C, C])\n # Make sure that the retaliations are increasing\n # Retaliate once and forgive\n self.responses_test([C], [D], [D])\n self.responses_test([C, D], [D, C], [C])\n self.responses_test([C, D, C], [D, C, C], [C])\n # Retaliate twice and forgive\n self.responses_test([C, D, C], [D, C, D], [D, D])\n self.responses_test([C, D, C, D, D], [D, C, D, C, C], [C])\n # Opponent defection during retaliation doesn't increase retaliation period\n self.responses_test([C, D, C, D, D], [D, C, D, D, C], [C])\n # Retaliate thrice and forgive\n self.responses_test([C, D, C, D, D, C], [D, C, D, C, C, D], [D, D, D])\n history_1 = [C, D, C, D, D, C, D, D, D]\n history_2 = [D, C, D, C, C, D, C, C, C]\n self.responses_test(history_1, history_2, [C])", "def test_apply_endorsements(self):", "def test_solareclipses_get(self):\n pass", "def testBeliefs2sk(self):", "def test_01_lighting(self):", "def test_art_from_taste_space(self):", "def test_strategy(self):\n self.responses_test([C, C, C, C], [C, C, C, C], [C])\n self.responses_test([C, C, C, C, D], [C, C, C, D, C], [D])\n self.responses_test([C] * 11, [C] * 10 + [D], [C])", "def testProtractedNSESanityChecks(self):\n self.assertGreater(self.c3.get_species_richness(1), self.c2.get_species_richness(1))\n self.assertLess(self.c4.get_species_richness(1), self.c3.get_species_richness(1))", "def test_multiple_commands_at_same_time(self):", "def test_actor_matches_activity(self):", "def test_find_naked_twins(self):\n self.assertEqual(solution.find_naked_twins(self.before_naked_twins_1), self.before_naked_twins_1_boxes)", "def test_AKs_correct_preflop_odds(self):\n self.assertEqual(self.hand.preFlopOdds10, 20.7)", "def testProtractedPostApplicationSanityChecks(self):\n self.assertLess(self.c.get_species_richness(1), self.c.get_species_richness(3))\n self.assertLess(self.c.get_species_richness(2), self.c.get_species_richness(4))\n self.assertLess(self.c.get_species_richness(5), self.c.get_species_richness(3))\n self.assertLess(self.c.get_species_richness(6), self.c.get_species_richness(4))\n self.assertEqual(4, self.c.get_species_richness(1))\n self.assertEqual(4, self.c.get_species_richness(2))\n self.assertEqual(7, self.c.get_species_richness(3))\n self.assertEqual(7, self.c.get_species_richness(4))\n self.assertEqual(4, self.c.get_species_richness(5))\n self.assertEqual(4, self.c.get_species_richness(6))\n self.assertEqual(21, self.c.get_species_richness(7))\n self.assertEqual(38, self.c.get_species_richness(8))", "def testProtractedPostApplicationSanityChecks(self):\n self.assertLess(self.c.get_species_richness(1), self.c.get_species_richness(3))\n self.assertLess(self.c.get_species_richness(2), self.c.get_species_richness(4))\n self.assertLess(self.c.get_species_richness(5), self.c.get_species_richness(3))\n self.assertLess(self.c.get_species_richness(6), self.c.get_species_richness(4))\n self.assertEqual(4, self.c.get_species_richness(1))\n self.assertEqual(4, self.c.get_species_richness(2))\n self.assertEqual(7, self.c.get_species_richness(3))\n self.assertEqual(7, self.c.get_species_richness(4))\n self.assertEqual(4, self.c.get_species_richness(5))\n self.assertEqual(4, self.c.get_species_richness(6))\n self.assertEqual(21, self.c.get_species_richness(7))\n self.assertEqual(38, self.c.get_species_richness(8))", "def testProtractedPostApplicationSanityChecks(self):\n self.assertLess(self.c.get_species_richness(1), self.c.get_species_richness(3))\n self.assertLess(self.c.get_species_richness(2), self.c.get_species_richness(4))\n self.assertLess(self.c.get_species_richness(5), self.c.get_species_richness(3))\n self.assertLess(self.c.get_species_richness(6), self.c.get_species_richness(4))\n self.assertEqual(4, self.c.get_species_richness(1))\n self.assertEqual(4, self.c.get_species_richness(2))\n self.assertEqual(7, self.c.get_species_richness(3))\n self.assertEqual(7, self.c.get_species_richness(4))\n self.assertEqual(4, self.c.get_species_richness(5))\n self.assertEqual(4, self.c.get_species_richness(6))\n self.assertEqual(21, self.c.get_species_richness(7))\n self.assertEqual(38, self.c.get_species_richness(8))", "def test_poets_get(self):\n pass", "def test_two_unsampled_arms(self):\n self._test_two_unsampled_arms()", "def test_when_oppenent_all_Cs(self):\n self.responses_test([C, C, C, C], [C, C, C, C], [C, C, C],\n random_seed=5)", "def test_when_oppenent_all_Cs(self):\n self.responses_test([C, C, C, C], [C, C, C, C], [C, C, C],\n random_seed=5)", "def test_post_foods(self):\n pass", "def test_99_correct_preflop_odds(self):\n self.assertEqual(self.hand.preFlopOdds10, 15.6)", "def test_change_provisioned_throughput_usual_case():", "def test_57o_correct_preflop_odds(self):\n self.assertEqual(self.hand.preFlopOdds10, 7.9)", "def multishot(attacker_schema, victim_schema):\n\n multishot = attacker_schema.multishot.get(victim_schema.name, 0)\n return multishot > 0 and (multishot - 1.0) / multishot > random.random()", "def test_counter_proposal_offer(self):\n pass", "def test_odd(self):", "def test_mandir(self):\n self.chck_triple('mandir')", "def test_actionWithNoTargetInDarkRoom(self):\n self._test(\n \"wear pants\",\n [\"It's too dark to see.\"], # to dark to see... the pants? any pants?\n [])", "def test_asteroid_or_comet():\n for comet in comets:\n assert Names.asteroid_or_comet(comet) == 'comet', \\\n 'failed for {}'.format(comet)\n for asteroid in asteroids:\n if asteroid != '2017 U1':\n assert Names.asteroid_or_comet(asteroid) == 'asteroid', \\\n 'failed for {}'.format(asteroid)", "def test_hand_has_two_pair(hand, card_list, expected):\n hand.add_cards(card_list)\n assert hand.has_two_pair() == expected", "def beats(self, one, two):\n return ((one == 'rock' and two == 'scissors') or\n (one == 'scissors' and two == 'paper') or\n (one == 'paper' and two == 'rock'))", "def testOddPlayersWithBye():\n deleteMatches()\n deletePlayers()\n registerPlayer(\"Alien 1\")\n registerPlayer(\"Alien 2\")\n registerPlayer(\"Alien 3\")\n standings = playerStandings()\n [id1, id2, id3] = [row[0] for row in standings]\n reportMatch(id1, id2)\n reportMatch(id3, id3, False, True)\n reportMatch(id2, id3)\n reportMatch(id1, id1, False, True)\n\n # id1 and id3 have one bye each. In this round, bye should be given to id2\n pairings = swissPairings()\n for pairing in pairings:\n if pairing[0]!=id2 and pairing[0]==pairing[2]:\n raise ValueError(\n \"swissPairings() should not award bye to a player who already\"\n \"has a bye.\"\n )\n if pairing[0]==id2 and pairing[2]!=id2:\n raise ValueError(\n \"swissPairings() has to award a bye when there is an odd number\"\n \"of players.\"\n )\n print \"2. Bye is not given to a player who already has a bye.\"", "def test_vp_mark2_complex(self):\n battle = self.battle\n s1 = battle.create_skirmish(self.bob, 30) # Attack with 30 -> 8vp\n s2 = s1.react(self.alice, 15,\n troop_type=\"cavalry\") # Oppose with 30 -> 7vp\n s3 = s2.react(self.bob, 14) # Oppose with 14 -> 1vp\n s3.react(self.alice, 1) # Oppose with 1\n\n s4 = s1.react(self.dave, 10, hinder=False) # Support with 10 -> 10vp\n s4.react(self.carol, 15) # Oppose with 15\n\n result = s1.resolve()\n self.assertEqual(result.victor, self.bob.team)\n\n # 10 because the 1 VP for s3 counts now, and the 1 extra lowers the\n # number of troops bob opposes with, which increases the number of\n # troops in s2, which increases the VP it's worth.\n self.assertEqual(result.vp, 10)", "def test_two_game(self):\n self.choice.side_effect = [\"ant\", \"baboon\"]\n self.input.side_effect = list(\"ant\" \"y\" \"babon\" \"n\")\n\n gallows.main()\n\n self.xprint.assert_any_call('Yes! The secret word is \"ant\"! '\n 'You have won!')\n self.xprint.assert_any_call('Yes! The secret word is \"baboon\"! '\n 'You have won!')", "def first_let_caps(self):\n \n arg = '@TWEET!et test case'\n actual = tweets.extract_mentions(arg)\n expected = ['tweet']\n msg = \"Expected {}, but returned {}\".format(expected, actual)\n self.assertEqual(actual, expected, msg)", "def test_twenty_rounds_joss_then_tft_for_noncyclers(self):\n seed = 4\n match = axl.Match(\n (axl.FirstByJoss(), axl.AntiCycler()), turns=20, seed=seed\n )\n match.play()\n expected_actions = match.result + [\n (C, C),\n (C, C),\n (C, D),\n (D, C),\n (C, C),\n ]\n self.versus_test(\n axl.AntiCycler(),\n expected_actions=expected_actions,\n seed=seed,\n turns=24,\n )", "def test_emirp_check():\r\n pass", "def test_T2():", "def test_T2():", "def test_strategy(self):\n self.first_play_test(C)", "def testGetMultiplePetitionsById():\n\tapi = c.Api()\n\toutput = api.getMultiplePetitionsById([2297756, 1756395])\n\tif type(output) is list:\n\t\tassert True", "def test_secondary(self):\n st = ServiceTicketFactory()\n self.assertFalse(st.is_primary())", "def test_fav_2(self):\n\t\tplayer_list = [Player(\"Eric Eephus\", 1, 200000, 10), Player(\"Fred First\", 2, 100000, 20), Player(\"Gary Glove\", 2, 200000, 50), Player(\"Henry Hitter\", 4, 300000, 70), Player(\"Ian Inning\", 1, 100000, 30), Player(\"Jack Junk\", 3, 200000, 5)]\n\t\tself.assertEqual( free_agent_vorp(player_list, 800000, 4), (155, 800000, [\"Ian Inning\", \"Gary Glove\", \"Henry Hitter\", \"Jack Junk\"]) )", "def test_manlext(self):\n self.chck_triple('manlext')", "def check():\n suspicious_telemarketers = get_suspicious_telemarketers(calls, texts)\n outgoing = set()\n non_tele = set()\n for c in calls:\n outgoing.add(c[0])\n non_tele.add(c[1])\n for t in texts:\n non_tele.add(t[0])\n non_tele.add(t[1])\n telemarketers = sorted(outgoing - non_tele)\n if len(suspicious_telemarketers) == len(telemarketers):\n print('Pass')", "def test_outE_traverals(self):\r\n results = self.blake.outE()\r\n assert len(results) == 1\r\n assert self.blake_in_theoretics in results", "def test_get_scorable_twos_34okp():\n roll = np.array([2, 2, 2, 2, 6, 6])\n expected = {\n \"one\": 0,\n \"five\": 0,\n \"three-ones\": False,\n \"three-twos\": False,\n \"three-threes\": False,\n \"three-fours\": False,\n \"three-fives\": False,\n \"three-sixes\": False,\n \"four-of-a-kind\": True,\n \"three-and-one\": False,\n \"five-of-a-kind\": False,\n \"six-of-a-kind\": False,\n \"straight\": False,\n \"three-pairs\": False,\n \"four-and-pair\": True,\n \"triplets\": False,\n }\n actual = analyze_roll.get_scorable(roll)\n assert expected == actual", "def test_strategy(self):\n self.responses_test([], [], [C], random_seed=1)\n self.responses_test([], [], [D], random_seed=2)", "def test_fav_4(self):\n\t\tplayer_list = [Player(\"Sam Slinger\", 1, 100000, 10), Player(\"Terry Toss\", 6, 100000, 20), Player(\"Urvine Umpire\", 2, 200000, 50), Player(\"Victor Vorp\", 10, 200000, 60), Player(\"Wesley Walk\", 9, 100000, 99)]\n\t\tself.assertEqual( free_agent_vorp(player_list, 300000, 5), (60, 300000, [\"Urvine Umpire\", \"Sam Slinger\"]) )", "def substantiate():", "def test_bothV_traversals(self):\r\n results = self.blake.bothV()\r\n assert len(results) == 2\r\n assert self.beekeeping in results", "def test_two_full_house(self):\n self.assertEqual(poker([self.fh, self.fh]), [self.fh, self.fh])", "def test_tie_when_both_hands_are_straightflush():\n from poker_rankings import PokerHand\n heroes_hand = PokerHand(\"5H 4H 3H 2H AH\")\n villains_hand = PokerHand(\"5H 4H 3H 2H AH\")\n heroes_hand.compare_with(villains_hand) == 'Tie'", "def test_two_and_three_card_petitions(self):\n f = gtrutils.check_petition_combos\n\n self.assertTrue( f( 0, 0, [], True, True))\n\n self.assertFalse( f( 1, 0, [], True, True))\n self.assertFalse( f( 1, 0, [1], True, True))\n self.assertTrue( f( 1, 0, [2], True, True))\n self.assertTrue( f( 1, 0, [3], True, True))\n self.assertFalse( f( 1, 0, [4], True, True))\n self.assertTrue( f( 1, 1, [], True, True))\n self.assertTrue( f( 1, 2, [], True, True))\n self.assertTrue( f( 1, 3, [], True, True))\n self.assertFalse( f( 1, 4, [], True, True))\n\n self.assertFalse( f( 1, 1, [2], True, True))\n self.assertFalse( f( 1, 1, [3], True, True))\n self.assertFalse( f( 1, 2, [2], True, True))\n self.assertFalse( f( 1, 3, [2], True, True))\n self.assertFalse( f( 1, 3, [3], True, True))\n\n self.assertTrue( f( 2, 1, [2], True, True))\n self.assertTrue( f( 2, 1, [3], True, True))\n self.assertTrue( f( 2, 0, [4], True, True))\n self.assertTrue( f( 2, 0, [5], True, True))\n self.assertTrue( f( 2, 0, [6], True, True))\n self.assertTrue( f( 2, 4, [], True, True))\n self.assertTrue( f( 2, 5, [], True, True))\n self.assertTrue( f( 2, 6, [], True, True))\n \n self.assertTrue( f(13, 26, [], True, True))\n self.assertTrue( f(13, 39, [], True, True))\n self.assertTrue( f(13, 0, [26], True, True))\n self.assertTrue( f(13, 14, [12], True, True))\n self.assertTrue( f(13, 13, [10], True, True))\n self.assertTrue( f(13, 15, [11], True, True))\n self.assertFalse( f(13, 40, [], True, True))\n self.assertFalse( f(13, 11, [3], True, True))\n\n self.assertFalse( f(4, 1, [2,3,6], True, True))\n self.assertTrue( f(5, 1, [2,3,6], True, True))\n self.assertTrue( f(6, 1, [2,3,6], True, True))\n self.assertFalse( f(7, 1, [2,3,6], True, True))", "def test_solo_basic(self):\n self.assertEqual(self.solo.artist, 'Oscar Peterson')\n self.assertEqual(self.solo.end_time, '4:06')", "def test_poker_two_sf(self):\n self.assertEqual(\n poker([self.sf1, self.sf2, self.fk, self.fh]),\n [self.sf1, self.sf2])", "def test_dvidir(self):\n self.chck_triple('dvidir')", "def test_only_matches(self):\n completions = flask_server.get_completions(\"zy\", 5)\n self.assertEqual(completions, ['zydeco', 'zygote', 'zygotic', 'zymurgy'])", "def test_twopair_properties(self):\n self.assertEqual(self.hand.pair1Rank, 14)\n self.assertEqual(self.hand.pair2Rank, 4)\n self.assertEqual(self.hand.postHandType, 8)\n self.assertEqual(self.hand.postHandValue, 219)", "def test_one_trick_pony(self):\n self.validate_goal_for('game-20120625-114828-af02f875.html',\n u'WanderingWinder',\n 'OneTrickPony')", "def test_get_boat(self):\n pass", "def _consonance_test(self, testfunc, param=None):\n n = list(self.notes)\n while len(n) > 1:\n first = n[0]\n for second in n[1:]:\n if param is None:\n if not testfunc(first.name, second.name):\n return False\n else:\n if not testfunc(first.name, second.name, param):\n return False\n n = n[1:]\n return True", "def test_even(self):", "def test_pasture_intercon():\n a = Pasture((0, 1))\n b = Pasture((1, 1))\n c = Pasture((1, 2))\n d = Pasture([(0, 3), (0, 4)])\n e = Pasture((0, 2))\n f = Pasture([(1, 2), (1, 3)])\n\n p = Player(\"p0\", wood=20, rooms=[])\n p.build_pastures(a)\n p.build_pastures(b)\n p.build_pastures(c)\n\n p = Player(\"p0\", wood=20, rooms=[])\n p.build_pastures([a, b, c])\n\n p = Player(\"p0\", wood=20, rooms=[])\n p.build_pastures([a, b])\n p.build_pastures(c)\n\n p = Player(\"p0\", wood=20, rooms=[])\n p.build_pastures([b, c])\n p.build_pastures(a)\n\n p = Player(\"p0\", wood=20, rooms=[])\n p.build_pastures(b)\n p.build_pastures([a, c])\n\n p = Player(\"p0\", wood=20, rooms=[])\n p.build_pastures(c)\n with pytest.raises(AgricolaLogicError):\n p.build_pastures(a)\n\n p = Player(\"p0\", wood=20, rooms=[])\n p.build_pastures(a)\n with pytest.raises(AgricolaLogicError):\n p.build_pastures(c)\n\n p = Player(\"p0\", wood=20, rooms=[])\n with pytest.raises(AgricolaLogicError):\n p.build_pastures([a, c])\n\n p = Player(\"p0\", wood=20, rooms=[])\n p.build_pastures(d)\n p.build_pastures(e)\n\n p = Player(\"p0\", wood=20, rooms=[])\n p.build_pastures(d)\n p.build_pastures(f)\n\n p = Player(\"p0\", wood=20, fences_avail=17, rooms=[])\n p.build_pastures([a, b, c, d, e])\n\n p = Player(\"p0\", wood=20, fences_avail=17, rooms=[])\n with pytest.raises(AgricolaLogicError):\n p.build_pastures([a, b, c, d])", "def isspeech(phone):\n return phone not in OTHERS", "def test_bothE_traversals(self):\r\n results = self.jon.bothE()\r\n assert len(results) == 2\r\n assert self.jon_physics in results\r\n assert self.jon_in_beekeeping in results", "def test_selecting_only_audio_episodes(\n only_audio_episodes: List[LepEpisode],\n) -> None:\n assert len(only_audio_episodes) == 14 # Without duplicates", "def test_others(self):\n outputAssert = self.buildingTests([\"Hola me gust@ programar en ICC 1.03\"])\n self.assertTrue((outputAssert[0][4] == outputAssert[1][4] and outputAssert[0][5] == outputAssert[1][5]) ^ (outputAssert[0][4] == outputAssert[1][5]) , f\"El resultado debería ser: \\\"{outputAssert[1][5]}\\\"\")", "def do_tests(n, s, d, t):\n for i in range(t):\n if is_composite(n, s, d):\n return False\n return True", "def test_three_arms_one_unsampled_arm(self):\n self._test_three_arms_one_unsampled_arm()", "def test_vp_mark2(self):\n # Test of the VP system as outlined at http://redd.it/2k96il\n battle = self.battle\n s1 = battle.create_skirmish(self.bob, 30) # Attack with 30 -> 8vp\n s2 = s1.react(self.alice, 15,\n troop_type=\"cavalry\") # Oppose with 30 -> 7vp\n s2.react(self.bob, 14) # Oppose with 14\n\n result = s1.resolve()\n self.assertEqual(result.victor, self.bob.team)\n # Old way adds up VP, make sure that's not happening\n self.assertNotEqual(result.vp, 22)\n\n # New way only adds up VP for winning side\n # (8vp because the 15 in s2 was reduced to 8)\n self.assertEqual(result.vp, 8)\n self.assertEqual(result.vp, result.vp_for_team(self.bob.team))\n\n # What if the other side had won?\n self.assertEqual(result.vp_for_team(self.alice.team), 14)", "def check_collisions(self):", "def test_fav_5(self):\n\t\tplayer_list = [Player(\"Xavier X\", 1, 100000, 10), Player(\"Zane Zero\", 2, 100000, 20), Player(\"Alex Athlete\", 3, 200000, 50)]\n\t\tself.assertEqual( free_agent_vorp(player_list, 900000, 3), (80, 400000, [\"Alex Athlete\", \"Zane Zero\", \"Xavier X\"]) )", "def test_escalation_of_an_article_twice(self):\n token = self.user1.token()\n self.client.credentials(\n HTTP_AUTHORIZATION='Bearer ' + token)\n resp = self.escalate_an_article_twice()\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(resp.data[\"error\"], self.report_twice)", "def second_chance(pet_list):\n message = \"Sorry, we don't have this pet in our shop!\" \\\n \" Would you consider adopting one of these cuties instead: {}.\" \\\n .format(pet_list)\n return message, pet_list", "def test_T01():", "def test_fav_6(self):\n\t\tplayer_list = [Player(\"Blake Base\", 1, 300000, 10), Player(\"Corey Catcher\", 2, 500000, 20), Player(\"Dexter Dugout\", 3, 200000, 50)]\n\t\tself.assertEqual( free_agent_vorp(player_list, 100000, 4), (0, 0, []) )", "def test_observatory(lasco):\n assert lasco.observatory == \"SOHO\"", "def test_02_visit_again(self):", "def test_T3():", "def test_T3():", "def test_strategy(self):\n self.first_play_test(C)\n for i in range(10):\n history_1 = [C] * i\n history_2 = [C] * i\n self.responses_test(history_1, history_2, [C])\n # Now cooperate 10% less than opponent\n history_1 = [C] * 11\n history_2 = [D] * 11\n self.responses_test(history_1, history_2, [D], random_seed=10)\n history_1 = [C] * 11\n history_2 = [D] * 10 + [C]\n self.responses_test(history_1, history_2, [D], random_seed=10)\n # Test beyond 10 rounds\n history_1 = [C] * 11\n history_2 = [D] * 5 + [C] * 6\n self.responses_test(history_1, history_2, [D, D, D, D], random_seed=20)\n history_1 = [C] * 11\n history_2 = [C] * 9 + [D] * 2\n self.responses_test(history_1, history_2, [C, D, D, C], random_seed=25)", "def test_positive_electrode_potential_profile(self):\n\n # TODO: add these when have averages", "def find_all_ORFs_both_strands_unit_tests():\n\n # YOUR IMPLEMENTATION HERE", "def test_numcards_is_two(self):\n self.assertEqual(self.hand.numCards, 5)", "def test_03_visit_special(self):", "def test_numcards_is_two(self):\n self.assertEqual(self.hand.numCards, 2)", "def testNSESanityChecks(self):\n self.assertEqual(100, self.c.get_species_richness())\n self.assertEqual(67, self.c2.get_species_richness())", "def test_T4():", "def test_T4():", "def test_kyc_post_legal(self):\n pass", "def test_genius(self):\n bad_res = lw.get_lyrics('genius', 'eminem', 'los yourself')\n good_res = lw.get_lyrics('genius', 'eminem', 'lose yourself')\n self.assertEqual(bad_res, 404)\n self.assertTrue(good_res)", "def test_get_player_battles(self):\n pass", "def test_intent_classifier_vaporise(self):\n pass", "def has_twopair(self):\n count = 0\n self.suit_hist()\n for val in self.ranks.values():\n if val == 2:\n count += 1\n if count >= 2:\n self.rank_per_hand['1'] = \"two pair\"\n return True\n return False" ]
[ "0.6394057", "0.63925785", "0.63051224", "0.62454295", "0.62086844", "0.61224383", "0.60351664", "0.6014525", "0.599899", "0.5914783", "0.5904088", "0.58920974", "0.5878094", "0.5875978", "0.58667856", "0.5842628", "0.5808113", "0.5807632", "0.5807632", "0.5807632", "0.57639617", "0.5761038", "0.5725071", "0.5725071", "0.5724038", "0.5721632", "0.5711901", "0.5705524", "0.5691305", "0.5686428", "0.5645553", "0.5621175", "0.56087196", "0.5608002", "0.56078696", "0.55937064", "0.5582451", "0.55795753", "0.55606663", "0.5543689", "0.554025", "0.553457", "0.5533502", "0.5533502", "0.55309373", "0.55284345", "0.55181813", "0.55144405", "0.5501708", "0.5491728", "0.54907817", "0.5487248", "0.5486302", "0.547731", "0.54766715", "0.5474589", "0.54737514", "0.5472107", "0.54711044", "0.54706866", "0.5464367", "0.54599595", "0.54569846", "0.54567623", "0.5441313", "0.5437427", "0.5430859", "0.5427694", "0.54259473", "0.54221565", "0.54135233", "0.5413173", "0.54126674", "0.540564", "0.5404631", "0.54030395", "0.5401281", "0.54004496", "0.5395966", "0.53879267", "0.53876853", "0.538517", "0.5384639", "0.53845686", "0.5380503", "0.5380503", "0.5378033", "0.53712326", "0.5370173", "0.5364084", "0.53592443", "0.534924", "0.5326165", "0.53223246", "0.53223246", "0.53199434", "0.5317523", "0.53164506", "0.5315582", "0.5315224" ]
0.59637266
9
Test with two and threecard petitions
def test_two_and_three_card_petitions(self): f = gtrutils.check_petition_combos self.assertTrue( f( 0, 0, [], True, True)) self.assertFalse( f( 1, 0, [], True, True)) self.assertFalse( f( 1, 0, [1], True, True)) self.assertTrue( f( 1, 0, [2], True, True)) self.assertTrue( f( 1, 0, [3], True, True)) self.assertFalse( f( 1, 0, [4], True, True)) self.assertTrue( f( 1, 1, [], True, True)) self.assertTrue( f( 1, 2, [], True, True)) self.assertTrue( f( 1, 3, [], True, True)) self.assertFalse( f( 1, 4, [], True, True)) self.assertFalse( f( 1, 1, [2], True, True)) self.assertFalse( f( 1, 1, [3], True, True)) self.assertFalse( f( 1, 2, [2], True, True)) self.assertFalse( f( 1, 3, [2], True, True)) self.assertFalse( f( 1, 3, [3], True, True)) self.assertTrue( f( 2, 1, [2], True, True)) self.assertTrue( f( 2, 1, [3], True, True)) self.assertTrue( f( 2, 0, [4], True, True)) self.assertTrue( f( 2, 0, [5], True, True)) self.assertTrue( f( 2, 0, [6], True, True)) self.assertTrue( f( 2, 4, [], True, True)) self.assertTrue( f( 2, 5, [], True, True)) self.assertTrue( f( 2, 6, [], True, True)) self.assertTrue( f(13, 26, [], True, True)) self.assertTrue( f(13, 39, [], True, True)) self.assertTrue( f(13, 0, [26], True, True)) self.assertTrue( f(13, 14, [12], True, True)) self.assertTrue( f(13, 13, [10], True, True)) self.assertTrue( f(13, 15, [11], True, True)) self.assertFalse( f(13, 40, [], True, True)) self.assertFalse( f(13, 11, [3], True, True)) self.assertFalse( f(4, 1, [2,3,6], True, True)) self.assertTrue( f(5, 1, [2,3,6], True, True)) self.assertTrue( f(6, 1, [2,3,6], True, True)) self.assertFalse( f(7, 1, [2,3,6], True, True))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_only_three_card_petitions(self):\n f = gtrutils.check_petition_combos\n\n self.assertTrue( f( 0, 0, [0], False, True))\n\n self.assertFalse( f( 1, 0, [0], False, True))\n self.assertTrue( f( 1, 1, [0], False, True))\n self.assertTrue( f( 1, 0, [3], False, True))\n self.assertTrue( f( 1, 3, [0], False, True))\n\n self.assertFalse( f( 1, 1, [2], False, True))\n self.assertFalse( f( 1, 1, [3], False, True))\n self.assertFalse( f( 1, 1, [4], False, True))\n\n self.assertTrue( f( 2, 2, [0], False, True))\n self.assertTrue( f( 2, 1, [3], False, True))\n self.assertTrue( f( 2, 3, [3], False, True))\n self.assertTrue( f( 2, 6, [0], False, True))\n self.assertTrue( f( 2, 0, [6], False, True))\n self.assertFalse( f( 2, 4, [3], False, True))\n\n self.assertFalse( f( 3, 1, [], False, True))\n self.assertFalse( f( 3, 2, [], False, True))\n self.assertFalse( f( 3, 0, [3], False, True))\n self.assertFalse( f( 3, 0, [6], False, True))\n self.assertTrue( f( 3, 3, [], False, True))\n self.assertTrue( f( 3, 2, [3], False, True))\n self.assertTrue( f( 3, 3, [6], False, True))\n self.assertTrue( f( 3, 1, [6], False, True))\n self.assertTrue( f( 3, 0, [9], False, True))\n\n self.assertTrue( f(13,13, [], False, True))\n self.assertTrue( f(13,39, [], False, True))\n self.assertTrue( f(13, 0, [39], False, True))\n self.assertTrue( f(13,15, [24], False, True))\n self.assertTrue( f(13,15, [], False, True))\n self.assertTrue( f(13,12, [3], False, True))\n self.assertFalse( f(13,14, [], False, True))\n\n self.assertFalse( f( 6, 1, [3,6,9], False, True))\n self.assertTrue( f( 7, 1, [3,6,9], False, True))\n self.assertFalse( f( 8, 1, [3,6,9], False, True))", "def test_only_two_card_petitions(self):\n f = gtrutils.check_petition_combos\n\n self.assertTrue( f( 0, 0, [0], True, False))\n\n self.assertFalse( f( 1, 0, [], True, False))\n self.assertFalse( f( 1, 0, [1], True, False))\n self.assertTrue( f( 1, 0, [2], True, False))\n self.assertFalse( f( 1, 0, [3], True, False))\n self.assertFalse( f( 1, 0, [4], True, False))\n\n self.assertTrue( f( 1, 1, [], True, False))\n self.assertFalse( f( 1, 1, [2], True, False))\n\n self.assertFalse( f( 2, 0, [2], True, False))\n self.assertFalse( f( 2, 0, [3], True, False))\n self.assertTrue( f( 2, 0, [4], True, False))\n self.assertFalse( f( 2, 0, [5], True, False))\n \n self.assertTrue( f( 2, 1, [2], True, False))\n self.assertFalse( f( 2, 1, [3], True, False))\n self.assertFalse( f( 2, 1, [4], True, False))\n\n self.assertTrue( f(13, 26, [], True, False))\n self.assertTrue( f(13, 0, [26], True, False))\n self.assertTrue( f(13, 14, [12], True, False))\n self.assertTrue( f(13, 13, [10], True, False))\n self.assertFalse( f(13, 15, [11], True, False))\n\n self.assertFalse( f( 6, 1, [2,4,6], True, False))\n self.assertTrue( f( 7, 1, [2,4,6], True, False))\n self.assertFalse( f( 8, 1, [2,4,6], True, False))", "def testCard(self):\n # test1\n cardObj1 = Card('A','d')\n self.assertEquals(1,cardObj1.get_rank())\n self.assertEquals('d',cardObj1.get_suit())\n # test2\n cardObj2 = Card('J','d')\n self.assertEquals(10,cardObj2.get_rank())\n # test3\n cardObj3 = Card(5,'d')\n self.assertEquals(5,cardObj3.get_rank())", "def test_mano_3():\n g = get_card_by_description\n r = Round([\n [g(\"3 de Oro\"), g(\"5 de Oro\"), g(\"6 de Oro\")],\n [g(\"3 de Espada\"), g(\"5 de Basto\"), g(\"6 de Oro\")],\n ])\n assert r.result() == 1", "def differentiate_cards(card):\n\t\tdef High_Card(numbers,colors):\n\t\t\treturn len(set(numbers)) == 5\n\t\tdef One_Pair(numbers,colors):\n\t\t\treturn len(set(numbers)) == 4\n\t\tdef Two_Pairs(numbers,colors):\n\t\t\tif len(set(numbers)) != 3:\n\t\t\t\treturn False\n\t\t\treturn [numbers.count(i) for i in numbers].count(2) == 4\n\t\tdef Three_of_a_Kind(numbers,colors):\n\t\t\tif len(set(numbers)) != 3:\n\t\t\t\treturn False\n\t\t\tfor i in numbers:\n\t\t\t\tif numbers.count(i) == 3:\n\t\t\t\t\treturn True\n\t\t\treturn False\n\t\tdef Straight(numbers,colors):\n\t\t\tfor i in xrange(1,len(numbers)):\n\t\t\t\tif numbers[i] - numbers[i-1] != 1:\n\t\t\t\t\treturn False\n\t\t\treturn True\n\t\tdef Flush(numbers,colors):\n\t\t\treturn len(set(colors)) == 1\n\t\tdef Full_House(numbers,colors):\n\t\t\tnumbers_set = set(numbers)\n\t\t\tif len(numbers_set) != 2:\n\t\t\t\treturn False\n\t\t\ta = numbers[0]\n\t\t\tb= [x for x in numbers if x != a][0]\n\t\t\treturn (numbers.count(a) == 2 and numbers.count(b) == 3) or\\\n\t\t\t\t(numbers.count(a) == 3 and numbers.count(b) == 2)\n\t\tdef Four_of_a_Kind(numbers,colors):\n\t\t\tfor i in set(numbers):\n\t\t\t\tif numbers.count(i) == 4:\n\t\t\t\t\treturn True\n\t\t\treturn False\n\t\tdef Straight_Flush(numbers,colors):\n\t\t\treturn Straight(numbers,colors) and Flush(numbers,colors)\n\t\tdef Royal_Flush(numbers,colors):\n\t\t\tRoyal = [10,11,12,13,14]\n\t\t\treturn numbers == Royal and Flush(numbers,colors)\n\n\t\tcards = {'1':1,'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9,\n\t\t 'T':10,'t':10,'J':11,'j':11,'Q':12,'q':12,'K':13,'k':13,'A':14,'a':14}\n\t\tnumbers = [cards[i[0]] for i in card]\n\t\tnumbers.sort()\n\t\tcolors = [i[1] for i in card]\n\t\t\n\t\tif Royal_Flush(numbers,colors):return 9\n\t\telif Straight_Flush(numbers,colors):return 8\n\t\telif Four_of_a_Kind(numbers,colors):return 7\n\t\telif Full_House(numbers,colors):return 6\n\t\telif Flush(numbers,colors):return 5\n\t\telif Straight(numbers,colors):return 4\n\t\telif Three_of_a_Kind(numbers,colors):return 3\n\t\telif Two_Pairs(numbers,colors):return 2\n\t\telif One_Pair(numbers,colors):return 1\n\t\telif High_Card(numbers,colors):return 0", "def test():\n sf = \"6C 7C 8C 9C TC\".split() # Straight Flush\n sf1 = \"6C 7C 8C 9C TC\".split() # Straight Flush\n sf2 = \"6D 7D 8D 9D TD\".split() # Straight Flush\n fk = \"9D 9H 9S 9C 7D\".split() # Four of a Kind\n fk3 = \"TC TS TH 2C TD\".split() # Four of a Kind\n fh = \"TD TC TH 7C 7D\".split() # Full House\n fl = \"AH KH JH 6H TH\".split() # Flush\n st = \"AH KC QD JD TS\".split() # Straight\n tk = \"2H 2C 2D AC TD\".split() # Three of kind\n tp = \"TD 9H TH 7C 9S\".split() # Two Pair\n op = \"TD TC AD KD QD\".split() # One Pair\n hq = \"2D 3D 4C 5H 7H\".split() # High card\n al = \"AC 2D 4H 3D 5S\".split() # Ace-Low Straight\n tp1 = \"7H 7D 9C 3C 9S\".split() #Two Pair\n fkranks = card_ranks(fk)\n tpranks = card_ranks(tp)\n op1 = \"KH 7C 5S KS 2S\".split() # One pair\n tp2 = \"TH 3S 2H 3D TC\".split() # Two pair\n tk1 = \"TH JD JH 8C JC\".split() # Three of kind\n hq1 = \"TH 9D 5C 3H 2C\".split() # High card\n f3 = \"2C 4C 6C 7C TC\".split() # Flush\n s3 = \"3C 4D 5H 6D 7H\".split() # Straight\n assert poker([fk3, f3, s3]) == fk3 #gilje start\n assert poker([sf, 20*fk]) == sf\n assert poker([fk3, 5*f3]) == fk3\n assert card_ranks(fk3) == [10, 10, 10, 10, 2]\n assert card_ranks(f3) == [10, 7, 6, 4, 2]\n assert hand_rank(fk3) == (7, 10, 2)\n assert hand_rank(f3) == (5, [10, 7, 6, 4, 2])\n assert flush(f3) == True\n assert straight(card_ranks(s3)) == True\n assert straight(card_ranks(f3)) == False #gilje slutt\n assert poker([fh, tk, hq]) == fh #oistein start\n assert poker([fl, sf1, tk]) == sf1\n assert poker([op, al, fh]) == fh\n assert poker([st, fk, tp]) == fk\n assert poker([tk, tp, op]) == tk\n assert poker([hq, op, hq]) == op\n assert card_ranks(op1) == [13, 13, 7, 5, 2]\n assert card_ranks(tp2) == [10, 10, 3, 3, 2]\n assert card_ranks(tk1) == [11, 11, 11, 10, 8]\n assert card_ranks(hq1) == [10, 9, 5, 3, 2] #oistein slutt\n assert poker([hq, tp, op]) == tp#steffen start\n assert poker([al, st]) == st\n assert poker([al, st, fl]) == fl\n assert card_ranks(hq) == [7, 5, 4, 3, 2]\n assert card_ranks(fk) == [9, 9, 9, 9, 7]\n assert card_ranks(fh) == [10, 10, 10, 7, 7]#steffen slutt\n assert poker([sf2, tk, al]) == sf2#arild start\n assert poker([hq, st]) == st\n assert poker([al, st, fk]) == fk\n assert flush(fl) == True\n assert straight(card_ranks(tp)) == False\n assert card_ranks(fk) == [9, 9, 9, 9, 7]\n assert card_ranks(hq) == [7, 5, 4, 3, 2]\n assert hand_rank(tk) == (3, 2, [14, 10, 2, 2, 2])\n assert hand_rank(st) == (4, 14)\n assert kind(5, tpranks) == None#arild slutt\n assert poker([tp, op]) == tp #Even start\n assert poker([hq, tk]) == tk\n assert poker([sf1] + 50*[fl]) == sf1\n assert card_ranks(sf1) == [10, 9, 8, 7, 6]\n assert card_ranks(tk) == [14, 10, 2, 2, 2]\n assert card_ranks(st) == [14, 13, 12, 11, 10]\n assert kind(4, fkranks) == 9\n assert kind(3, fkranks) == None\n assert kind(2, tpranks) == 10\n assert kind(1, fkranks) == 7 #Even slutt\n assert poker([sf1, fk, fh]) == sf1\n assert poker([fk, fh]) == fk\n assert poker([fh, fh]) == [fh, fh]\n assert poker([sf1]) == sf1\n assert poker([sf1] + 99*[fh]) == sf1\n assert hand_rank(sf1) == (8, 10)\n assert hand_rank(fk) == (7, 9, 7)\n assert hand_rank(fh) == (6, 10, 7)\n assert straight(card_ranks(al)) == True\n assert poker([sf1, sf2, fk, fh]) == [sf1, sf2]\n assert kind(4, fkranks) == 9\n assert kind(3, fkranks) == None\n assert kind(2, fkranks) == None\n assert kind(1, fkranks) == 7\n return 'You did good, and you should feel good about yourself :)'", "def test_numcards_is_two(self):\n self.assertEqual(self.hand.numCards, 5)", "def test_hand_has_three_of_a_kind(hand, card_list, expected):\n hand.add_cards(card_list)\n assert hand.has_three_of_a_kind() == expected", "def test_numcards_is_two(self):\n self.assertEqual(self.hand.numCards, 2)", "def test_play_card(self):\n self.plr.piles[Piles.DECK].set(\"Silver\", \"Province\", \"Moat\", \"Gold\")\n self.vic.piles[Piles.DECK].set(\"Duchy\")\n self.plr.test_input = [\"discard\", \"discard\", \"putback\"]\n self.plr.play_card(self.card)\n self.g.print_state()\n self.assertEqual(self.plr.actions.get(), 1)\n self.assertIn(\"Duchy\", self.vic.piles[Piles.DISCARD])\n self.assertIn(\"Gold\", self.plr.piles[Piles.DISCARD])\n self.assertIn(\"Province\", self.plr.piles[Piles.HAND])\n self.assertIn(\"Moat\", self.plr.piles[Piles.HAND])\n self.assertIn(\"Silver\", self.plr.piles[Piles.DECK])", "def test_cards_get(self):\n pass", "def test_hand_has_two_pair(hand, card_list, expected):\n hand.add_cards(card_list)\n assert hand.has_two_pair() == expected", "def test_seven_cards_poker(self):\n self.assertEqual(best_hand(\"6C 7C 8C 9C TC 5C JS\".split()),\n ('6C', '7C', '8C', '9C', 'TC'))\n self.assertEqual(best_hand(\"TD TC TH 7C 7D 8C 8S\".split()),\n ('TD', 'TC', 'TH', '8C', '8S'))\n self.assertEqual(best_hand(\"JD TC TH 7C 7D 7S 7H\".split()),\n ('JD', '7C', '7D', '7S', '7H'))", "def is_three_of_a_kind(hand):\n count = {c:0 for c in cards.keys()}\n for card in hand:\n count[card[0]] += 1\n for c in count:\n if count[c] == 3:\n return (True, cards[c])\n return None", "def is_three_of_a_kind(hand):\n\tis_a_three_of_a_kind = False\n\ti = 0\n\twhile i < 13:\n\t\tif hand[i] == 3:\n\t\t\tis_a_three_of_a_kind = True\n\t\ti += 1 \n\t\t\n\thigh_card = 0\n\tj = 0\n\twhile j < 13 and is_a_three_of_a_kind == True:\n\t\tif hand[j] == 3 and j >= high_card:\n\t\t\thigh_card = j\n\t\tj += 1\n\tif is_a_three_of_a_kind:\n\t\treturn True, high_card\n\telse:\n\t\treturn False", "def check_cards(self, cards):\n if len(cards) != 3:\n return False\n\n match = 0\n card1 = cards[0][1]\n card2 = cards[1][1]\n card3 = cards[2][1]\n\n match += self.compare_element(card1, card2, card3, 'shape')\n match += self.compare_element(card1, card2, card3, 'colour')\n match += self.compare_element(card1, card2, card3, 'count')\n match += self.compare_element(card1, card2, card3, 'fill')\n\n return match == 4", "def test_mano_4():\n g = get_card_by_description\n r = Round([\n [g(\"3 de Oro\"), g(\"5 de Oro\"), g(\"6 de Oro\")],\n [g(\"3 de Espada\"), g(\"5 de Basto\"), g(\"7 de Oro\")],\n ])\n assert r.result() == -1", "def test_play(self):\n self.plr.piles[Piles.DECK].set(\"Province\")\n self.plr.add_card(self.card, Piles.HAND)\n self.plr.test_input = [\"keep\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.coins.get(), 2)\n self.assertIn(\"Province\", self.plr.piles[Piles.DECK])\n self.assertNotIn(\"Province\", self.plr.piles[Piles.DISCARD])", "def test_value_soft_hand_two_aces(self):\n hand = self._hand\n cards = [BjCard('spades', '2'), BjCard('hearts', 'A'), BjCard('clubs', '5'), BjCard('diamonds', 'A')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.value, 19)", "def test_strategy(self):\n self.responses_test([C, C, C, C], [C, C, C, C], [C])\n self.responses_test([C, C, C, C, D], [C, C, C, D, C], [D])\n self.responses_test([C] * 11, [C] * 10 + [D], [C])", "def check_card_action(self, card):\n if card.value == \"7\":\n self.seven_punishment()\n elif card.value == \"8\":\n self.eight_punishment()\n elif card.value == \"9\":\n self.nine_punishment()\n elif card.value == \"B\":\n self.jack_wish()", "def is_ok_three_lines(line1, line2, line3):\n card1 = line1[0]\n card2 = line1[1]\n card3 = line1[2]\n card4 = line2[0]\n card5 = line2[1]\n card6 = line2[2]\n\n card7 = line3[0]\n card8 = line3[1]\n card9 = line3[2]\n idents1 = [card.ident for card in line1]\n idents2 = [card.ident for card in line2]\n idents3 = [card.ident for card in line3]\n\n intersection = list(set(idents1) & set(idents2))\n if intersection:\n dprint(\"intersection 12\")\n return False\n\n intersection = list(set(idents1) & set(idents3))\n if intersection:\n return False\n\n intersection = list(set(idents2) & set(idents3))\n if intersection:\n return False\n\n print(\"??????????????\")\n show_triple(line1, line2, line3)\n print(\"??????????????\")\n\n if not is_ok_two_lines(line1, line2):\n return False\n if not is_ok_two_lines(line2, line3):\n return False\n\n return True", "def test_poker_two_sf(self):\n self.assertEqual(\n poker([self.sf1, self.sf2, self.fk, self.fh]),\n [self.sf1, self.sf2])", "def test_create_recipe_card(self):\n pass", "def test_value_hard_hand_two_aces(self):\n hand = self._hand\n cards = [BjCard('spades', '6'), BjCard('hearts', 'A'), BjCard('clubs', 'K'), BjCard('diamonds', 'A')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.value, 18)", "def test_play_no_gain(self):\n self.card = self.g[\"Festival\"].remove()\n self.plr.piles[Piles.HAND].set(\"Duchy\")\n self.plr.add_card(self.card, Piles.HAND)\n self.plr.favors.set(2)\n self.plr.test_input = [\"No\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.favors.get(), 2)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 1)", "def test_three_arms_two_winners(self):\n self._test_three_arms_two_winners()", "def test_card_suit(mock_card):\n assert mock_card.suit == Suit.SPADE", "def test_for_dealing_card():\n deck1 = Shoe()\n deck1.deal_card()\n assert len(deck1.deck) == 51", "def test_play(self):\n self.card = self.g[\"Festival\"].remove()\n self.plr.piles[Piles.HAND].set(\"Duchy\")\n self.plr.add_card(self.card, Piles.HAND)\n self.plr.favors.set(2)\n self.plr.test_input = [\"Gain\"]\n self.plr.play_card(self.card)\n self.g.print_state()\n self.assertEqual(self.plr.favors.get(), 1)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 1 + 1)", "def test_four_kind(self):\n self.assertEqual(poker([self.fk, self.fh]), [self.fk])", "def simulate(deck): \n \n # Initialize Banker and Player\n # player_third_card is initialized to -10 to signify that it doesn't exist.\n banker = 0\n player = 0\n player_third_card = -10\n \n# Deal out two hands of two cards\n player = (player + deck.pop()) % 10\n player = (player + deck.pop()) % 10\n \n banker = (banker + deck.pop()) % 10\n banker = (banker + deck.pop()) % 10\n \n# Check for natural\n if player >= 8 and banker >= 8:\n return 'tie'\n elif banker >= 8:\n return 'banker'\n elif player >= 8:\n return 'player'\n \n\n# Run through Player hand\n if player <= 5:\n player_third_card = deck.pop()\n player = (player + player_third_card) % 10\n \n\n# Run through Banker hand\n if player_third_card == -10 and banker < 6:\n banker = (banker + deck.pop()) % 10\n elif banker <= 2:\n banker = (banker + deck.pop()) % 10\n elif banker == 3 and player_third_card != 8:\n banker = (banker + deck.pop()) % 10\n elif banker == 4 and player_third_card >= 2 and player_third_card <=7:\n banker = (banker + deck.pop()) % 10\n elif banker == 5 and player_third_card >= 4 and player_third_card <=7:\n banker = (banker + deck.pop()) % 10\n elif banker == 6 and (player_third_card == 6 or player_third_card == 7):\n banker = (banker + deck.pop()) % 10\n \n \n# Compare hands and return results\n if player > banker:\n return 'player'\n elif banker > player:\n return 'banker'\n else:\n return 'tie'", "def test_poker_one_sf(self):\n self.assertEqual(poker([self.sf1, self.fk, self.fh]), [self.sf1])", "def resolve_card(board, eng_type, scot_type, card, role, parameter, truce = False):\n\n if role == 'ENGLAND':\n which_side = eng_type\n elif role == 'SCOTLAND':\n which_side = scot_type\n\n\n if card == '1':\n movement_execution(board, which_side, role, int(card), truce)\n elif card == '2':\n movement_execution(board, which_side, role, int(card), truce)\n elif card == '3':\n movement_execution(board, which_side, role, int(card), truce)\n\n else:\n\n if role == 'ENGLAND' or not scottish_king.run_king(board, eng_type, scot_type):\n \n \n \n if card == 'SEA':\n \n if play_pass(which_side) == 'play':\n sea_execution(board, which_side, role)\n \n \n elif card == 'HER':\n \n if play_pass(which_side) == 'play':\n her_execution(board, which_side, role, eng_type, scot_type)\n \n \n elif card == 'VIC':\n if play_pass(which_side) == 'play':\n vic_execution(board, which_side, role, parameter)\n \n \n elif card == 'PIL':\n \n if play_pass(which_side) == 'play':\n pil_execution(board, which_side, role, parameter)\n \n \n elif card == 'TRU':\n \n if play_pass(which_side) == 'play':\n return True", "def test_hand_has_one_pair(hand, card_list, expected):\n hand.add_cards(card_list)\n assert hand.has_one_pair() == expected", "def checkDoubles(self,card): # need to check defenders handcount...\n multipleCards = [card]\n for i in range(4): # checking all other possible cards of same rank\n card_plus = card + 13 * i # checking higher values\n card_minus = card - 13 * i # checking lower values\n if card_plus in self.currentHand and card_plus < 51 and card_plus != card and card_plus not in multipleCards:\n print(\"Do you wish to add:\")\n cardManager.printHand([card_plus])\n prompt= input(\"to your attack? (y/n):\")\n while prompt != 'y' and prompt != 'n': # input checking\n print(\"Do you wish to add:\")\n cardManager.printHand([card_plus])\n prompt = input(\"to your attack? (y/n):\")\n if prompt == 'y':\n print(\"added\")\n multipleCards.append(card_plus)\n self.currentHand.remove(card_plus)\n else:\n print(\"Did not add\")\n if card_minus in self.currentHand and card_minus > 0 and card_plus != card and card_minus not in multipleCards:\n print(\"Do you wish to add:\")\n cardManager.printHand([card_minus])\n prompt = input(\"to your attack? (y/n):\")\n while prompt != 'y' and prompt != 'n': # input checking\n print(\"Do you wish to add:\")\n cardManager.printHand([card_minus])\n prompt = input(\"to your attack? (y/n):\")\n if prompt == 'y':\n print(\"added\")\n multipleCards.append(card_minus)\n self.currentHand.remove(card_minus)\n else:\n print(\"Did not add\")\n return multipleCards", "def testProtractedNSESanityChecks(self):\n self.assertGreater(self.c3.get_species_richness(1), self.c2.get_species_richness(1))\n self.assertLess(self.c4.get_species_richness(1), self.c3.get_species_richness(1))", "def test_two_full_house(self):\n self.assertEqual(poker([self.fh, self.fh]), [self.fh, self.fh])", "def test_create_card(self):\n data = {\n 'first_name': 'Ty',\n 'last_name': 'Cobb',\n 'variety': 'green portrait'\n }\n resp = self.app.post('cards', json=data)\n\n assert resp.status_code == 200\n\n assert data['first_name'] == resp.json['first_name']\n assert data['last_name'] == resp.json['last_name']\n assert data['variety'] == resp.json['variety']", "def check_valid(self, cards):\n\n if len(cards) == 1: # one card\n return True\n if len(cards) == 2: # two cards\n if ((self.num_to_card(int(cards[0])) == self.num_to_card(int(cards[1]))) or # two same cards\n (int(cards[0]) > 51) or # any card and a joker\n (int(cards[1])) > 51): # any card and a joker\n return True\n return False\n\n # 3 or more: all same number/ascending order\n # check how many jokers\n jokers = 0\n for card in cards:\n #print(int(card))\n #print(self.num_to_card(card))\n if int(card) > 51:\n jokers += 1\n #print(\"YESSSSSSSSSSIR\")\n #print(f'[THERE ARE {jokers} JOKERS]')\n\n # check if all same number\n sort = sorted(cards)\n #print(f'[THE SORTED CARDS: {sort}]')\n index = 0\n for card in sort:\n if self.num_to_card(int(card)) == self.num_to_card(int(sort[0])) or int(card) > 51:\n index += 1\n if index == len(cards):\n return True\n\n # check ascend order\n if not self.is_same_sign(cards):\n print('Here')\n return False\n\n #print(\"accend left\")\n return self.ascend(cards, jokers)", "def test_shared_cards_len(self):\n self.assertEqual(len(self.hand.sharedCards), 3)", "def test_cards_put(self):\n pass", "def test_suit(self):\n card = self._card\n self.assertEqual(card.suit, self._suit)", "def test_init(self):\n self.assertEqual(self.card.suit, \"Spades\")\n self.assertEqual(self.card.value, \"A\")", "def test_hand_is_four_of_a_kind(hand, card_list, expected):\n hand.add_cards(card_list)\n assert hand.is_four_of_a_kind() == expected", "def test_vp_mark2_complex(self):\n battle = self.battle\n s1 = battle.create_skirmish(self.bob, 30) # Attack with 30 -> 8vp\n s2 = s1.react(self.alice, 15,\n troop_type=\"cavalry\") # Oppose with 30 -> 7vp\n s3 = s2.react(self.bob, 14) # Oppose with 14 -> 1vp\n s3.react(self.alice, 1) # Oppose with 1\n\n s4 = s1.react(self.dave, 10, hinder=False) # Support with 10 -> 10vp\n s4.react(self.carol, 15) # Oppose with 15\n\n result = s1.resolve()\n self.assertEqual(result.victor, self.bob.team)\n\n # 10 because the 1 VP for s3 counts now, and the 1 extra lowers the\n # number of troops bob opposes with, which increases the number of\n # troops in s2, which increases the VP it's worth.\n self.assertEqual(result.vp, 10)", "def test_hand_find_category(hand, card_list, expected):\n hand.add_cards(card_list)\n assert hand.find_category() == expected", "def test_get_cards(self):\n card1 = CardFactory()\n self.session.commit()\n card2 = CardFactory()\n self.session.commit()\n resp = self.app.get('cards')\n data = resp.json\n\n assert resp.status_code == 200\n assert len(data) == 2\n\n assert data[0]['first_name'] == card1.first_name\n assert data[0]['last_name'] == card1.last_name\n assert data[0]['variety'] == card1.variety\n\n assert data[1]['first_name'] == card2.first_name\n assert data[1]['last_name'] == card2.last_name\n assert data[1]['variety'] == card2.variety", "def test_cards_get_list(self):\n pass", "def test_special_contacts(self):\n\n vcards = []\n\n # Generate a contact with no email address\n current_contact = bt_contacts_utils.VCard()\n current_contact.first_name = \"Mr.\"\n current_contact.last_name = \"Smiley\"\n current_contact.add_phone_number(\n bt_contacts_utils.generate_random_phone_number())\n vcards.append(current_contact)\n\n # Generate a 2nd contact with the same name but different phone number\n current_contact = bt_contacts_utils.VCard()\n current_contact.first_name = \"Mr.\"\n current_contact.last_name = \"Smiley\"\n current_contact.add_phone_number(\n bt_contacts_utils.generate_random_phone_number())\n vcards.append(current_contact)\n\n # Generate a contact with no name\n current_contact = bt_contacts_utils.VCard()\n current_contact.email = \"{}@gmail.com\".format(\n bt_contacts_utils.generate_random_string())\n current_contact.add_phone_number(\n bt_contacts_utils.generate_random_phone_number())\n vcards.append(current_contact)\n\n # Generate a contact with random characters in its name\n current_contact = bt_contacts_utils.VCard()\n current_contact.first_name = bt_contacts_utils.generate_random_string()\n current_contact.last_name = bt_contacts_utils.generate_random_string()\n current_contact.add_phone_number(\n bt_contacts_utils.generate_random_phone_number())\n vcards.append(current_contact)\n\n # Generate a contact with only a phone number\n current_contact = bt_contacts_utils.VCard()\n current_contact.add_phone_number(\n bt_contacts_utils.generate_random_phone_number())\n vcards.append(current_contact)\n\n # Generate a 2nd contact with only a phone number\n current_contact = bt_contacts_utils.VCard()\n current_contact.add_phone_number(\n bt_contacts_utils.generate_random_phone_number())\n vcards.append(current_contact)\n\n bt_contacts_utils.create_new_contacts_vcf_from_vcards(\n self.contacts_destination_path, PSE_CONTACTS_FILE, vcards)\n\n phone_numbers_added = bt_contacts_utils.import_device_contacts_from_vcf(\n self.pse, self.contacts_destination_path, PSE_CONTACTS_FILE)\n\n return self.connect_and_verify(phone_numbers_added)", "def mock_card():\n return Card(Suit.SPADE, 1)", "def test_triple_pile_driver(self):\n self.validate_goal_for('game-20121231-142658-fc8f047a.html',\n u'dominion cartel',\n 'TriplePileDriver')", "def test_discard_buy(self):\n self.plr.test_input = [\"finish selecting\", \"discard gold\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 2)\n self.assertEqual(self.plr.actions.get(), 1)\n self.assertEqual(self.plr.buys.get(), 2)\n self.assertNotIn(\"Gold\", self.plr.piles[Piles.HAND])", "def test_affect_of_strategy(self):\n self.responses_test([C, C, C], [C, C, C], [C, C, C])\n # Make sure that the retaliations are increasing\n # Retaliate once and forgive\n self.responses_test([C], [D], [D])\n self.responses_test([C, D], [D, C], [C])\n self.responses_test([C, D, C], [D, C, C], [C])\n # Retaliate twice and forgive\n self.responses_test([C, D, C], [D, C, D], [D, D])\n self.responses_test([C, D, C, D, D], [D, C, D, C, C], [C])\n # Opponent defection during retaliation doesn't increase retaliation period\n self.responses_test([C, D, C, D, D], [D, C, D, D, C], [C])\n # Retaliate thrice and forgive\n self.responses_test([C, D, C, D, D, C], [D, C, D, C, C, D], [D, D, D])\n history_1 = [C, D, C, D, D, C, D, D, D]\n history_2 = [D, C, D, C, C, D, C, C, C]\n self.responses_test(history_1, history_2, [C])", "def test_play_card(self):\n while True:\n card = self.g[\"Clashes\"].remove()\n if card.name == \"Battle Plan\":\n break\n self.plr.piles[Piles.DECK].set(\"Gold\")\n self.plr.piles[Piles.HAND].set(\"Estate\", \"Militia\")\n self.plr.add_card(card, Piles.HAND)\n self.plr.test_input = [\"Reveal Militia\", \"Rotate Clashes\"]\n self.plr.play_card(card)\n self.assertIn(\"Gold\", self.plr.piles[Piles.HAND])\n next_card = self.g[\"Clashes\"].remove()\n self.assertEqual(next_card.name, \"Archer\")", "def test_card_info_lookup(self):\n pass", "def check_cards_eligibility(self):\n for c in self.hand:\n c.check_actions(self)\n for c in self.phand:\n c.check_actions(self)\n for c in self.discard:\n c.check_actions(self)\n for c in self.active_player.phand:\n c.check_actions(self)\n for c in self.active_player.hand:\n c.check_actions(self)\n for c in self.active_player.discard:\n c.check_actions(self)\n for c in self.played_user_cards:\n c.check_actions(self)\n if ACTION_KEEP in self.actions:\n for p in self.players:\n for c in p.phand:\n c.check_actions(self)\n for c in p.hand:\n c.check_actions(self)\n for c in p.discard:\n c.check_actions(self)", "def test_for_splittable_hand_with_aces(self):\n hand = self._hand\n cards = [BjCard('clubs', 'A'), BjCard('diamonds', 'A')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.can_split, True)", "def third_street ():\r\n global all_hands\r\n global deck\r\n global players\r\n #Set of all cards for third street draw \r\n third_street_draws = random.sample(deck, len(players)*3)\r\n #Remove drawn cards from deck\r\n for card in third_street_draws:\r\n deck.remove(card)\r\n #Deal 1 Card Each Player Until 3, then reveal third street.\r\n for player in players:\r\n hand = []\r\n for i in range(0,3):\r\n hand.append(third_street_draws[player+len(players)*i])\r\n all_hands.append(hand)\r\n if player == you:\r\n print(\"Your hand is: \", str(all_hands[you]))\r\n else:\r\n print(\"Player \", str(player+1), \"'s 3rd Street hand is: \", str(hand[2]))", "def test_strategy(self):\n self.first_play_test(C)", "def test_deal_insufficient_cards(self):\n cards = self.deck._deal(100)\n self.assertEqual(len(cards), 52)\n self.assertEqual(self.deck.count(), 0)", "def test_for_non_blackjack(self):\n hand = self._hand\n cards = [BjCard('clubs', '8'), BjCard('diamonds', '8')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.is_blackjack, False)", "def test_when_oppenent_all_Cs(self):\n self.responses_test([C, C, C, C], [C, C, C, C], [C, C, C],\n random_seed=5)", "def test_when_oppenent_all_Cs(self):\n self.responses_test([C, C, C, C], [C, C, C, C], [C, C, C],\n random_seed=5)", "def test_handcrafted_examples(self):\n for i in range(1000):\n self.assertEqual(perfectd(0), True)\n self.assertEqual(prime(0), False)\n self.assertEqual(prime(2), True)\n self.assertEqual(prime(7), True)\n self.assertEqual(prime(15), False)\n self.assertEqual(perfectd(6), True)\n self.assertEqual(perfectd(15), False)", "def test_discard_action(self):\n self.plr.test_input = [\"discard silver\", \"finish selecting\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 2)\n self.assertEqual(self.plr.actions.get(), 2)\n self.assertEqual(self.plr.buys.get(), 1)\n self.assertNotIn(\"Silver\", self.plr.piles[Piles.HAND])", "def test_for_blackjack(self):\n hand = self._hand\n cards = [BjCard('clubs', '10'), BjCard('diamonds', 'A')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.is_blackjack, True)", "def test_get_scorable_3pair():\n roll = np.array([1, 1, 5, 5, 2, 2])\n expected = {\n \"one\": 2,\n \"five\": 2,\n \"three-ones\": False,\n \"three-twos\": False,\n \"three-threes\": False,\n \"three-fours\": False,\n \"three-fives\": False,\n \"three-sixes\": False,\n \"four-of-a-kind\": False,\n \"three-and-one\": False,\n \"five-of-a-kind\": False,\n \"six-of-a-kind\": False,\n \"straight\": False,\n \"three-pairs\": True,\n \"four-and-pair\": False,\n \"triplets\": False,\n }\n actual = analyze_roll.get_scorable(roll)\n assert expected == actual", "def test_deck_validation(self):\n \tpass", "def hit(player):\n deal_random_card(player)", "def compare_element(self, card1, card2, card3, element):\n e1 = card1[element]\n e2 = card2[element]\n e3 = card3[element]\n if (e1 == e2 and e2 == e3) or (e1 != e2 and e1 != e3 and e2 != e3):\n # All the same or all different.\n return 1\n return 0", "def is_pair(hand):\n\tis_a_pair = False\n\ti = 0\n\twhile i < 13:\n\t\tif hand[i] == 2:\n\t\t\tis_a_pair = True\n\t\ti += 1 \n\thigh_card = 0\n\tj = 0\n\twhile j < 13 and is_a_pair == True:\n\t\tif hand[j] == 2 and j >= high_card:\n\t\t\thigh_card = j\n\t\tj += 1\n\tif is_a_pair:\n\t\treturn True, high_card\n\telse:\n\t\treturn False", "def test_when_opponent_all_Ds(self):\n self.responses_test([C, C, C, C], [D, D, D, D], [D, D, D], random_seed=5)", "def card_factory(rank,suit):\n pass", "def compare_cards(board, eng_card, scot_card, eng_type, scot_type, eng_parameter, scot_parameter):\n\n\n \n year_ends_early = False\n\n \n if get_card_val(eng_card) > get_card_val(scot_card):\n who_goes_first = 'ENGLAND'\n \n elif get_card_val(eng_card) < get_card_val(scot_card):\n who_goes_first = 'SCOTLAND'\n \n elif get_card_val(eng_card) == get_card_val(scot_card):\n \n who_goes_first = 'ENGLAND'\n \n if get_card_val(eng_card) == 4 and get_card_val(scot_card) == 4:\n year_ends_early = True\n \n board.who_goes_first = who_goes_first\n\n eng_played_truce = False\n if eng_card == 'TRU':\n eng_played_truce = True\n\n scot_played_truce = False\n if scot_card == 'TRU':\n scot_played_truce = True\n\n if who_goes_first == 'ENGLAND':\n\n resolve_card(board, eng_type, scot_type, eng_card, 'ENGLAND', eng_parameter, scot_played_truce)\n resolve_card(board, eng_type, scot_type, scot_card, 'SCOTLAND', scot_parameter, eng_played_truce)\n \n elif who_goes_first == 'SCOTLAND':\n \n resolve_card(board, eng_type, scot_type, scot_card, 'SCOTLAND', scot_parameter, eng_played_truce)\n resolve_card(board, eng_type, scot_type, eng_card, 'ENGLAND', eng_parameter, scot_played_truce)\n \n return who_goes_first, year_ends_early", "def war_tie(cls, card1, card2):\n print(\"------------------------------------------------\")\n print(\"Tie!!\")\n print(f\"{card1.show()} is equal to {card2.show()}\")\n print(\"------------------------------------------------\")", "def test_create_card_missing_variety(self): # pylint: disable=invalid-name\n data = {\n 'first_name': 'Ty',\n 'last_name': 'Cobb',\n }\n resp = self.app.post('cards', json=data)\n\n assert resp.status_code == 200\n\n assert data['first_name'] == resp.json['first_name']\n assert data['last_name'] == resp.json['last_name']\n assert resp.json['variety'] is None", "def test_get_war_result_tie(self):\n five = Card.objects.create(suit=Card.CLUB, rank=\"five\")\n five2 = Card.objects.create(suit=Card.HEART, rank=\"five\")\n self.assertEqual(five.get_war_result(five2), 0)", "def check_card(card1, card2):\r\n\r\n num1 = card1.split(' ')[0]\r\n num2 = card2.split(' ')[0]\r\n\r\n if num1 == num2:\r\n return True\r\n else:\r\n return False", "def show_triple(line1, line2, line3):\n card1 = line1[0]\n card2 = line1[1]\n card3 = line1[2]\n card4 = line2[0]\n card5 = line2[1]\n card6 = line2[2]\n card7 = line3[0]\n card8 = line3[1]\n card9 = line3[2]\n print(card1.ident, card2.ident, card3.ident)\n print(card4.ident, card5.ident, card6.ident)\n print(card7.ident, card8.ident, card9.ident)\n print(card1, card2, card3)\n print(card4, card5, card6)\n print(card7, card8, card9)", "def CARD_SUITS() -> tuple:\n return \"Diamonds\", \"Hearts\", \"Clubs\", \"Spades\"", "def test_vp_mark2(self):\n # Test of the VP system as outlined at http://redd.it/2k96il\n battle = self.battle\n s1 = battle.create_skirmish(self.bob, 30) # Attack with 30 -> 8vp\n s2 = s1.react(self.alice, 15,\n troop_type=\"cavalry\") # Oppose with 30 -> 7vp\n s2.react(self.bob, 14) # Oppose with 14\n\n result = s1.resolve()\n self.assertEqual(result.victor, self.bob.team)\n # Old way adds up VP, make sure that's not happening\n self.assertNotEqual(result.vp, 22)\n\n # New way only adds up VP for winning side\n # (8vp because the 15 in s2 was reduced to 8)\n self.assertEqual(result.vp, 8)\n self.assertEqual(result.vp, result.vp_for_team(self.bob.team))\n\n # What if the other side had won?\n self.assertEqual(result.vp_for_team(self.alice.team), 14)", "def test_market_1_2(self):\n\n def check_1_2(buyers: List[float], sellers: List[float], expected_num_of_deals: int,\n expected_prices: List[float]):\n market = Market([\n AgentCategory(\"buyer\", buyers),\n AgentCategory(\"seller\", sellers),\n ])\n ps_recipe = [1, 2]\n self._check_market(market, ps_recipe, expected_num_of_deals, expected_prices)\n\n check_1_2(buyers=[9], sellers=[-4, -3],\n expected_num_of_deals=0, expected_prices=[9, -4.5])\n check_1_2(buyers=[9, 8, 7, 6], sellers=[-6, -5, -4, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n check_1_2(buyers=[9, 8], sellers=[-4, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n check_1_2(buyers=[9, 8], sellers=[-6, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n check_1_2(buyers=[9, 8], sellers=[-4, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n\n # PRICE CROSSES ZERO AT FIRST PHASE\n check_1_2(buyers=list(range(20)), sellers=[-3, -2, -1],\n expected_num_of_deals=1, expected_prices=[18, -9])", "def test_for_splittable_hand_with_ten_value_cards(self):\n hand = self._hand\n cards = [BjCard('clubs', '10'), BjCard('diamonds', 'K')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.can_split, True)", "def switch3():\n print(f\"Your credit card number is: {id_class.credit_card}\")\n main()", "def play_set(pl1, pl2, start):\n for plyr in pl1, pl2:\n print \"Cards of \", plyr.name, \" are :\"\n for this_card in plyr.hand:\n print this_card.num, this_card.suit \n \n pl1.score += 1", "def test_int():\n cards= \"5H 5C 6S 7S KD 2C 3S 8S 8D TD\".split()\n h1, h2 = to_hand(cards[:5]), to_hand(cards[5:])\n s_h1, s_h2 = score_int(h1), score_int(h2)\n assert s_h1 == 10000+100*(5)+(13)\n assert s_h2 == 10000+100*(8)+(10)\n assert s_h1 < s_h2\n\n cards= \"5D 8C 9S JS AC\t 2C 5C 7D 8S QH\".split()\n s_h1, s_h2 = score_int(to_hand(cards[:5])), score_int(to_hand(cards[5:]))\n assert s_h1 == 100*(14)\n assert s_h2 == 100*(12)\n assert s_h1 > s_h2\n\n cards= \"2D 9C AS AH AC 3D 6D 7D TD QD\".split()\n s_h1, s_h2 = score_int(to_hand(cards[:5])), score_int(to_hand(cards[5:]))\n #print( h1, \"=\", s_h1, \":\", h2, \"=\", s_h2 )\n assert s_h1 == 30000+100*(14)\n assert s_h2 == 50000+100*(12)\n assert s_h1 < s_h2\n\n cards= \"4D 6S 9H QH QC 3D 6D 7H QD QS\".split()\n s_h1, s_h2 = score_int(to_hand(cards[:5])), score_int(to_hand(cards[5:]))\n assert s_h1 == 10000+100*(12)+(9)\n assert s_h2 == 10000+100*(12)+(7)\n assert s_h1 > s_h2\n\n cards= \"2H 2D 4C 4D 4S 3C 3D 3S 9S 9D\".split()\n s_h1, s_h2 = score_int(to_hand(cards[:5])), score_int(to_hand(cards[5:]))\n assert s_h1 == 60000+100*(4)+(2)\n assert s_h2 == 60000+100*(3)+(9)\n assert s_h1 > s_h2", "def test_creature(self):\n self.assertEqual(len(self.processor), 3)", "def test_privatize_fountain_card(self):\n g = Game()\n g.add_player(uuid4(), 'p0')\n g.add_player(uuid4(), 'p1')\n\n gs = g\n p0, p1 = gs.players\n\n latrine, insula, statue, road = cm.get_cards(['Latrine', 'Insula', 'Statue', 'Road'])\n p0.fountain_card = latrine\n\n gs_private = g.privatized_game_state_copy('p1')\n p0, p1 = gs_private.players\n\n self.assertEqual(p0.fountain_card, Card(-1))", "def get_card_val(card):\n\n if card == '1':\n return 1\n if card == '2':\n return 2\n if card == '3':\n return 3\n else:\n return 4", "def get_skill(self, other_card):\n ## YOUR CODE IS HERE ##", "def get_skill(self, other_card):\n ## YOUR CODE IS HERE ##", "def get_skill(self, other_card):\n ## YOUR CODE IS HERE ##", "def get_skill(self, other_card):\n ## YOUR CODE IS HERE ##", "def deal_card():\r\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\r\n return (random.choice(cards))", "def test_consumed_cards(self):\n game = TestGames.replay(9, [3, 1, 0, 0])\n consumed_cards = game.consumed_cards()\n self.assertEqual(len(consumed_cards), 8)\n\n self.assertListEqual(list(consumed_cards),\n [2 / 5, # guards\n 0 / 2, # priest\n 1 / 2, # baron\n 0 / 2, # handmaid\n 1 / 2, # prince\n 0 / 1, # king\n 0 / 1, # countess\n 0 / 1]) # princess", "def test_check_three_of_a_kind_true(self):\n three_of_a_kind_fixtures = [[1, 1, 1, 1, 1],\n [1, 1, 1, 1, 2],\n [1, 1, 1, 2, 2],\n [2, 1, 1, 1, 2],\n [2, 2, 1, 1, 1],\n [2, 1, 1, 1, 1],\n ]\n\n for fixture in three_of_a_kind_fixtures:\n score = self.roll.check_three_of_a_kind(fixture)\n\n self.assertEqual(score, sum(fixture))\n self.assertEqual(len(fixture), 5)", "def test_two_game(self):\n self.choice.side_effect = [\"ant\", \"baboon\"]\n self.input.side_effect = list(\"ant\" \"y\" \"babon\" \"n\")\n\n gallows.main()\n\n self.xprint.assert_any_call('Yes! The secret word is \"ant\"! '\n 'You have won!')\n self.xprint.assert_any_call('Yes! The secret word is \"baboon\"! '\n 'You have won!')", "def p2_check(suit, mod):\n if mod > 0:\n raise ValueError('die')\n\n c = cards[:]\n flips = abs(mod) + 1\n results = []\n for i in range(flips):\n random.shuffle(c)\n results.append(c.pop())\n\n score = 100\n used = None\n for card in results:\n if card[suit] < score:\n score = card[suit]\n used = card\n\n if used['Pro']:\n return 1\n return 0", "def test_highcard_properties(self):\n self.assertEqual(self.hand.pair1Rank, 0)\n self.assertEqual(self.hand.highCard, 14)\n self.assertEqual(self.hand.postHandType, 10)\n self.assertEqual(self.hand.postHandValue, 247)" ]
[ "0.7009317", "0.6996275", "0.65862507", "0.6539536", "0.65000474", "0.64990026", "0.64953685", "0.6476889", "0.64768463", "0.6467368", "0.6336645", "0.6292399", "0.6257715", "0.62031925", "0.61506057", "0.6122968", "0.60999656", "0.6068418", "0.6024874", "0.6014065", "0.6011133", "0.59815776", "0.59764344", "0.5955293", "0.5946174", "0.593857", "0.5919217", "0.59104884", "0.58651847", "0.58564675", "0.5802106", "0.5801137", "0.57982665", "0.57980233", "0.57809937", "0.5778117", "0.5767368", "0.5760808", "0.57555836", "0.57392883", "0.573582", "0.5735715", "0.5735487", "0.5733445", "0.57327765", "0.57321733", "0.5726449", "0.57108015", "0.5701551", "0.56926006", "0.56882197", "0.56836283", "0.56792283", "0.5675622", "0.5648318", "0.5625959", "0.56059337", "0.55776036", "0.55705863", "0.5566107", "0.5565788", "0.5561849", "0.5541331", "0.5541331", "0.55410534", "0.552817", "0.5515292", "0.5513984", "0.5507166", "0.5504329", "0.5499777", "0.54976094", "0.549623", "0.54911923", "0.5482156", "0.5478493", "0.5477798", "0.54731214", "0.546968", "0.54685515", "0.5463151", "0.54538924", "0.5448257", "0.5448132", "0.5440995", "0.54403514", "0.543958", "0.543437", "0.5431599", "0.5429299", "0.5429137", "0.5429137", "0.5429137", "0.5429137", "0.5428218", "0.5421123", "0.5418961", "0.5417073", "0.54156816", "0.5413091" ]
0.7173055
0
Test limit at beginning of game.
def test_initial_limit(self): g = test_setup.simple_two_player() p1, p2 = g.players self.assertEqual(g._clientele_limit(p1), 2) self.assertEqual(g._clientele_limit(p2), 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _safe_limit_check(self):\n if self.rem == 40:\n self.time_start = time.time()\n elif time.time() - self.time_start >= 11:\n self.rem = 40\n self.time_start = time.time()\n elif self.rem <= 0:\n t = 11 - (time.time() - self.time_start)\n\n if t <= 0:\n self.rem = 40\n self.time_start = time.time()\n else:\n if self.policy == Limit.Sleep:\n time.sleep(t)\n elif self.policy == Limit.Ignore:\n return False\n\n self.rem -= 1\n return True", "def _limit_fill():\n z = random.randint(0, 10)\n if z/10.0 < LIMIT_FILL_PROBABILITY:\n return True\n else:\n return False", "def limit(self, limit):\n self._evaluated = False\n self._limit = limit\n return self", "def test_get_remain_limit(self):\n finder = FinderInsidePro(self.test_key)\n limit = finder.get_remain_limit()\n assert isinstance(limit, int)\n assert limit > 0", "def STAND_LIMIT() -> int:\n return 15", "def testVerifyArbitraryLimits(self):\n\t\tpolicy = MinimumPlaybackPolicy(3)\n\t\tfor x in range(0, 3):\n\t\t\tself.failIf(policy.hasBeenPlayedBack)\n\t\t\tself.failIf(policy.isReadyForRemoval)\n\t\t\tpolicy.playback()\n\t\tself.failUnless(policy.hasBeenPlayedBack)\n\t\tself.failIf(policy.isReadyForRemoval)", "def testEnsurePlaybacksAreLimited(self):\n\t\tpolicy = FixedCountPolicy()\n\t\tself.failIf(policy.hasUnlimitedPlaybacks)", "def limit(self, limit):\n raise NotImplementedError(\"This should have been implemented.\")", "def test_identify_limit(limit, all, expected):\n assert identify_limit(limit, all) == expected", "def range1000():\n \n global upper_limit\n upper_limit = 1000\n # button that changes the range to [0,1000) and starts a new game \n global secret_number\n secret_number = random.randrange(0,1000)\n new_game()", "def check_limit(limit_value):\n try:\n limit = int(limit_value)\n except ValueError:\n raise SystemExit('The argument \"limit\" should be a positive number')\n else:\n if limit < 1:\n raise SystemExit('The argument \"limit\" should be greater than 0')\n else:\n return limit", "def test_limit(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url + \"?limit=5\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 5)\n self.assertEqual(channel.json_body[\"next_token\"], 5)\n self._check_fields(channel.json_body[\"event_reports\"])", "def test_limit(self):\n\t\tfor lim in [1, '234', -100, '-200']:\n\t\t\tself.filter.set_limit(lim)\n\t\t\tself.assertEqual(int(lim), self.filter.get_limit(), \"Limit mismatch: %s!=%s\" % (lim, self.filter.get_limit()))\n\t\tself.filter.set_limit('test')\n\t\tself.assertEqual('test', self.filter.get_limit(), \"String set failed for Filter limit.\")", "def test_get_limit_no_dependants(self):\n self.assertEqual(\n gross_income.get_limit(),\n gross_income.BASE_LIMIT\n )", "def limit_reached(self):\n if len(self.selected) >= self.limit:\n return True\n return False", "def enough_players():\n return True", "def test_limit_is_negative(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url + \"?limit=-5\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])", "def is_limited(self) -> bool:\n return self.__times > ActionState.UNLIMITED", "def check_limit(limit):\n if limit:\n limit = int(limit)\n if limit > settings.MAX_LISTING_LIMIT or \\\n limit < settings.MIN_LISTING_LIMIT:\n # SuspiciousOperation raises 400 bad request in Django 1.11.\n # https://docs.djangoproject.com/en/1.11/ref/views/#the-400-bad-request-view\n raise SuspiciousOperation()\n return limit\n return settings.DEFAULT_LISTING_LIMIT", "def range100():\n \n global upper_limit\n upper_limit = 100\n # button that changes the range to [0,100) and starts a new game \n global secret_number\n secret_number = random.randrange(0,100)\n new_game()", "def test_limits(manager):\n manager.update(days=40)\n compare_results_attrs(manager.items, fixtures.FIXTURES[51])", "def start_new_game(word, max_tries):\n\n # replace the pass statement with your code\n pass", "def test_gallows_outside_bounds(self):\n with mock.patch(\"hangman.cli.screen.print\") as mock_print:\n for index in [-1, len(hangman.cli.screen._GALLOWS)]:\n hangman.cli.screen.Screen.gallows(index)\n mock_print.assert_called_with(hangman.cli.screen._GALLOWS[-1])", "def testAtLeastSetsLimit(self):\n\t\tc = Controller()\n\t\tx = c.mock(KlassBeingMocked)\n\t\tx.g = 6\n\t\tc.atLeast(2)\n\t\tc.replay()\n\t\tx.g = 6\n\t\tself.failUnlessRaises(Exception, c.verify)\n\t\tx.g = 6\n\t\tc.verify()", "def test_limit_0(self):\n self.Person.objects.create(name=\"User A\", age=20)\n\n # Test limit with 0 as parameter\n qs = self.Person.objects.limit(0)\n assert qs.count() == 0", "def limit(self, limit):\n self._limit = limit", "def test_set_maximum(self):\n self.server_widget.maximum = 1000\n assert self.client_widget.maximum == self.server_widget.maximum", "def _validate_clear_args(limit):\n min_limit = 1\n max_limit = 20\n default_error = f\"[Limit] The `limit` argument must be a number between {min_limit} and {max_limit}\"\n try:\n limit = int(limit)\n except (ValueError, TypeError):\n return default_error\n if not (min_limit <= limit <= max_limit):\n return default_error\n return None", "def on_limit(self, track):\n log.debug(\"Received limit notice: %d\", track)", "def adb_video_limit(given_limit):\n return given_limit", "async def cclimit(self, ctx, limit_amount: int = None):\n if limit_amount is None:\n return await ctx.send_help()\n if limit_amount < 0:\n return await ctx.send(\"You need to use a number larger than 0.\")\n await self.config.limit.set(limit_amount)\n await ctx.send(f\"Chatchart is now limited to {limit_amount} messages.\")", "def _test_out_of_range(self):\n self.cdbconf.setup('KKG')\n self.cdbconf.setConfiguration('CUSTOM_OPT')\n az, el, latitude = [radians(50)] * 3\n site_info = {'latitude': latitude}\n self.p.setup(site_info, self.source, self.device)\n self.p.setRewindingMode('AUTO')\n offset = 20\n max_limit = self.device.getMaxLimit() \n min_limit = self.device.getMinLimit()\n Pis = max_limit - offset/2\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.p.setPosition(Pis)\n time.sleep(0.2) # Wait a bit for the setup\n max_rewinding_steps = (max_limit - min_limit) // self.device.getStep()\n expected = Pis - max_rewinding_steps*self.device.getStep() + offset\n self.source.setAzimuth(az)\n self.source.setElevation(el)\n self.p.startUpdating('MNG_TRACK', 'ANT_NORTH', az, el, None, None)\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.p.setOffset(offset)\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.assertEqual(self.device.getActPosition(), expected)", "def test_remain():\r\n global pickno\r\n #Change pick number to the total amount of balls\r\n # Ex. If we have 3 balls remaining the user cannot pick 4\r\n if total <= 4:\r\n pickno = total", "def test_next_window_time_no_sample_passed(self):\n test_window_scheme = WindowingScheme(self.window_test_filter, 3)\n time.sleep(4)\n collected_value = test_window_scheme.filter(self.more_than_upper_bound)\n self.assertEquals(collected_value, self.more_than_upper_bound)", "def _determine_limit(self, limit):\n\n # Note: +1 is allowed here because it allows\n # the user to fetch one beyond to see if they\n # are at the end of the list\n if not limit:\n res = conf.api_configuration.max_returned_num + 1\n else:\n res = min(conf.api_configuration.max_returned_num + 1, limit)\n\n return res", "def check(self):\n self.__check_request_limit()", "def test_environmental_impact_compliance():\n emissions = 12000\n legal_limit = 300\n assert emissions < legal_limit", "def test_api_requests_limited(self):\n\n did_reach_rate_limit = False\n for _ in range(110):\n response = self.send_get('Participant', expected_status=None)\n if response.status_code == TooManyRequests.code:\n did_reach_rate_limit = True\n break\n\n self.assertTrue(did_reach_rate_limit)", "def limit(self, limit):\n\n # Return between 1 and 250 results, defaults to 10\n return max(1, min(250, int(limit) if limit else 10))", "def testEnsurePlaybacksAreUnlimited(self):\n\t\tpolicy = MinimumPlaybackPolicy()\n\t\tself.failUnless(policy.hasUnlimitedPlaybacks)", "def check_limit(redis_client):\n if redis_client.llen('query_counter') >= API_RATE_LIMIT:\n left_val = redis_client.lpop('query_counter')\n parsed_left_val = float(left_val.decode('utf-8'))\n current_api_window = (datetime.utcnow() - timedelta(minutes=API_WINDOW_PERIOD)).timestamp()\n if parsed_left_val > current_api_window:\n redis_client.lpush('query_counter', left_val)\n return False\n return True", "def le(value, limit):\n return value <= limit", "def main():\r\n global user_pick, pickno, total\r\n test_total()\r\n sleep(delay)\r\n print(\"It is your turn!\")\r\n pickno = int(4)\r\n #Repeats the process as many times as we need\r\n while total >= 4:\r\n user_pick = int(input(\"How many balls do you want to get? (Up to 4)\"))\r\n test_remain\r\n test_pick()\r\n remain()\r\n cmp_logic()\r\n sleep(delay)\r\n print(\"You should pick \" + str(total))\r\n user_pick = int(input(\"How many balls do you want to get? (Up to 4)\"))\r\n test_remain()\r\n test_pick()\r\n remain()\r\n # Only way that USER WINS!!\r\n if int(total) == 0:\r\n sleep(delay)\r\n print(\"User WINS!\")\r\n exit()", "def range1000():\n global r100, r1000, turn_count\n r1000 = True\n new_game()", "def attempt_limit(self) -> int:\n return self._attempt_limit", "def limit(self, limit):\n\n self._limit = limit", "def limit(self, limit):\n\n self._limit = limit", "def limit(self, limit):\n\n self._limit = limit", "def test_gallows_within_bounds(self):\n with mock.patch(\"hangman.cli.screen.print\") as mock_print:\n for index in range(len(hangman.cli.screen._GALLOWS)):\n hangman.cli.screen.Screen.gallows(index)\n mock_print.assert_called_with(hangman.cli.screen._GALLOWS[index])", "def range1000():\n global range, guesses_made, guesses_remaining, correct_num, victory_condition\n\n range = 1000\n guesses_made = 0\n guesses_remaining = 10#calculate_remaining_guesses(range)\n correct_num = random.randrange(range)\n victory_condition = False\n\n print \"New Game! Guess between 1 and \", range\n print \"Remaining guesses: \", guesses_remaining", "def test_deploy_more_vms_than_limit_allows(self):\n self.test_limits(vm_limit=2)", "def test_limit_and_from(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url + \"?from=5&limit=10\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(channel.json_body[\"next_token\"], 15)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 10)\n self._check_fields(channel.json_body[\"event_reports\"])", "def check(self):\n logging.info(\"rate limit remaining %s\" % self.remaining)\n while self.remaining <= 1:\n now = time.time()\n logging.debug(\"rate limit < 1, now=%s and reset=%s\", now,\n self.reset)\n if self.reset and now < self.reset:\n # padded with 5 seconds just to be on the safe side\n secs = self.reset - now + 5\n logging.info(\"sleeping %s seconds for rate limiting\" % secs)\n time.sleep(secs)\n else:\n # sleep a second before checking again for new rate limit\n time.sleep(1)\n # get the latest limit\n self.ping()\n self.remaining -= 1", "def on_limit(self, track):\n print \"!!! Limitation notice received: %s\" % str(track)\n return", "def ge(value, limit):\n return value >= limit", "def perform_strategy(self, counter):\r\n if counter == 0: # reset. allows for multiple runs with the same instance\r\n self.maxes_counter = 0\r\n self.curr_max = -inf\r\n m = self.envelopes[counter].money\r\n if m >= self.curr_max:\r\n self.maxes_counter += 1\r\n self.curr_max = m\r\n return self.maxes_counter == self.N", "def allowedLimit(self, number, msg=None):\n return allowed_limit(number, msg)", "def on_limit(self, track):\n print ('Got Rate limit Message', str(track))\n return True # Don't kill the stream", "def test_messenger_limit():\n all_messages_resp = requests.get(BASE_URL)\n all_messages = all_messages_resp.json()\n total_message_count = len(all_messages)\n message_limit = total_message_count // 2\n\n query_params = {\"limit\": message_limit}\n limit_resp = requests.get(BASE_URL, params=query_params)\n limited_messages = limit_resp.json()\n assert limit_resp.status_code == 200\n assert len(limited_messages) == message_limit", "def assign_leftLimit():\r\n player.rect.x = 25", "def test_pick():\r\n global user_pick\r\n while user_pick > pickno or user_pick <= 0 or type(user_pick):\r\n user_pick = int(input(\"How many balls do you want to get? (Up to 4)\"))\r\n #Keeps the number of balls picked by user to be between 0 and 4\r", "def calculate(self, limit):\r\n pass", "def __init__(self, min_player_count):\n self.min_player_count = min_player_count", "def time_limit(self):\n return 2503", "def set_n_players(self):\n complain = \"\"\n while True:\n clear_output()\n try:\n self.n_players = int(\n input(f\"{complain}Please insert the number of players (between 2 to 6): \\n\"))\n if self.n_players >= 2 and self.n_players < 7:\n self.start_troops = 120 / self.n_players\n break\n elif self.n_players < 2:\n complain = \"Not enough players!\\n\"\n elif self.n_players >= 7:\n complain = \"Too many players!\\n\"\n except:\n complain = \"Not a valid number!\\n\"\n pass", "def set_limit(self, limit):\n self.limit = limit\n self._prune()", "def check_timelimit_slot__(self):\n timerange = self.valkkafs_manager.getTimeRange()\n \n if len(timerange) < 1: # empty tuple implies no frames\n print(\"PlaybackController: check_timelimit_slot__ : WARNING! no timerange from ValkkaFS\")\n # fabricate a dummy time : this exact moment\n current_time = int(time.time() * 1000)\n timerange = (\n current_time,\n current_time + 1\n )\n print(\"check_timelimits_slot__ : timerange =\", timerange)\n print(\"check_timelimits_slot__ : %s -> %s\" % ( formatMstimestamp(timerange[0]), formatMstimestamp(timerange[1]) ) )\n self.signals.set_fs_time_limits.emit(timerange)", "def on_limit(self, track):\n print(track)", "def assign_upLimit():\r\n player.rect.y = 25", "def test_get_speed_limit():\n center = Coordinates(1 , 1)\n radius = 10\n speed_limit = 20\n\n assert get_speed_limit(center, radius, speed_limit) != center\n assert get_speed_limit(center, radius, speed_limit) != radius\n assert get_speed_limit(center, radius, speed_limit) == speed_limit", "def test_limit(db_session):\n query_params = {\"limit\": \"1\"}\n parser = ModelQueryParamParser(query_params)\n album_resource = AlbumResource(session=db_session)\n offset_limit_info = parser.parse_offset_limit(page_max_size=30)\n offset = offset_limit_info.offset\n limit = offset_limit_info.limit\n result = album_resource.get_collection(\n filters=parser.parse_filters(album_resource.model),\n sorts=parser.parse_sorts(),\n limit=limit,\n offset=offset\n )\n assert len(result) == 1", "def chkLimits(name, value, Min, Max, unit = 'V', Hex = False):\n\n #global Log\n if not Min < value < Max:\n if Hex:\n line = \"%s:0x%X OUT OF LIMITS (0x%X, 0x%X). Test Failed !\" %(name, value, Min, Max)\n else:\n line = \"%s:%F %s OUT OF LIMITS (%F, %f). Test Failed !\" %(name, value, unit, Min, Max)\n Log.logError(line)\n Err.bumpError()\n return False\n if Hex:\n Log.logText(' '+'%s:0x%X expected range from:0x%X To: 0x%X. Test PASS !'% (name, value, Min, Max))\n else:\n Log.logText(' '+'%s:%F %s expected range From:%F %s To: %F %s. Test PASS !'% (name, value, unit, Min,unit, Max, unit))\n return True", "def evaluation_point(self):\n if self.turns in range(40,(self.game_length - 19), self.policy_eval_point):\n return True", "def testTooManyPlaybacksRaisesAnException(self):\n\t\tpolicy = FixedCountPolicy()\n\t\tpolicy.playback()\n\t\tself.failUnlessRaises(RecordedCallsWereNotReplayedCorrectly, policy.playback)", "def test(self, num_test=1000):\n\n self.num_test = num_test\n self.player_wins = 0\n self.opponent_wins = 0\n self.optimal_wins = 0\n self.optimal_losses = 0\n\n self.game.restart()\n\n for test in range(num_test):\n self.game.deal_cards()\n possible_actions = self.game.get_actions()\n\n player_state = self.game.get_player_state()\n player_action = self.player.get_action(player_state,\n possible_actions,\n explore_exploit='exploit')\n opponent_state = self.game.get_opponent_state()\n opponent_action = self.opponent.get_action(opponent_state,\n possible_actions)\n\n (self.game.set_player_action(player_action)\n .set_opponent_action(opponent_action))\n player_score, opponent_score = self.game.get_scores()\n\n if player_score > opponent_score:\n self.player_wins += 1\n elif opponent_score > player_score:\n self.opponent_wins += 1\n\n optimal_result = self.game.get_optimal_result()\n if optimal_result > 0:\n self.optimal_wins += 1\n elif optimal_result < 0:\n self.optimal_losses += 1\n\n print(\"Testing done!\")", "def test_collection_limit(testapp):\n obj1 = {\n 'title': \"Testing1\",\n 'description': \"This is testing object 1\",\n }\n obj2 = {\n 'title': \"Testing2\",\n 'description': \"This is testing object 2\",\n }\n obj3 = {\n 'title': \"Testing3\",\n 'description': \"This is testing object 3\",\n }\n testapp.post_json('/embedding-tests', obj1, status=201)\n testapp.post_json('/embedding-tests', obj2, status=201)\n testapp.post_json('/embedding-tests', obj3, status=201)\n res_all = testapp.get('/embedding-tests/?limit=all', status=200)\n res_2 = testapp.get('/embedding-tests/?limit=2', status=200)\n assert len(res_all.json['@graph']) == 3\n assert len(res_2.json['@graph']) == 2", "def _check(self):\n try:\n num = int(self.ids.res_lim.text)\n # reset negative numbers to zero\n if num <= 0:\n self.ids.res_lim.text = str(0)\n except ValueError:\n self.ids.res_lim.text = str(self.limit)\n\n return int(self.ids.res_lim.text)", "def start_of_game(self):\n pass", "def limit(self, key):\n if self._debug:\n return False\n\n counter = self.database.List(self.name + ':' + key)\n n = len(counter)\n is_limited = False\n if n < self._limit:\n counter.prepend(str(time.time()))\n else:\n oldest = counter[-1]\n if (oldest is not None) and (time.time() - float(oldest) < self._per):\n is_limited = True\n else:\n counter.prepend(str(time.time()))\n del counter[:self._limit]\n counter.pexpire(int(self._per * 2000))\n return is_limited", "def test_limit_with_insula(self):\n d = TestDeck()\n\n g = test_setup.simple_two_player()\n\n p1, p2 = g.players\n\n self.assertEqual(g._clientele_limit(p1), 2)\n\n p1.buildings.append(Building(d.insula, 'Rubble', complete=True))\n\n self.assertEqual(g._clientele_limit(p1), 4)\n\n p1.influence = ['Stone']\n\n self.assertEqual(g._clientele_limit(p1), 7)", "def test_exceeded_limit(self):\n msg=self.sample_data(\"error_exceeded_limit.xml\")\n error = ErrorParser().process_all(msg)\n assert isinstance(error, PatronLoanLimitReached)\n eq_(u'Patron cannot loan more than 12 documents', error.message)", "def test_maximum_items(self):\n total = 4711\n self.es.set_maximum_items(total)\n self.assertEqual(self.es._total, total)", "def validate_correct_hint(self):\n is_response_hint_valid = False\n while is_response_hint_valid is False:\n hint_value = self.ask_user_input(\"Enter maximum hint threshold\")\n if not hint_value.isdigit():\n print(\"Not a number, please try again\")\n elif 0 <= int(hint_value) <= 81:\n is_response_hint_valid = True\n self.current_response = hint_value\n else:\n print(\"Number is out of the valid range, please try again\")\n return is_response_hint_valid", "def _testRatingLimit(self):\n\n comment = models.Comment.objects.all()[0]\n type = models.RatingType.objects.all()[0]\n try:\n val = type.limit + 10\n rating = models.Rating(comment=comment, type=type, value=val)\n rating.save()\n assert rating.value == type.limit\n finally:\n rating.delete()", "def test_set_project_limits(self):\n pass", "def range100():\n global r100, r1000\n r1000 = False\n new_game()", "def startGame(self, lengthOfPattern, maxNumberOfTurns):\n self._currentTurnNum = 0\n self._lengthOfPattern = lengthOfPattern\n self._maxNumberOfTurns = maxNumberOfTurns", "def calculate(self, limit):\n pass", "def test_update_instance_limit(self):\n pass", "def _allowed_page(self, box):\n if self._max_page:\n return self._min_page <= box.page <= self._max_page\n else:\n return self._min_page <= box.page", "def test_resume_game(self):\r\n\r\n a_player_1 = RandomPlayer(1)\r\n a_player_2 = UserPlayer(2)\r\n a_player_2.set_choice(0)\r\n a_players = [a_player_1, a_player_2]\r\n a_x_dist = 5\r\n a_y_dist = 5\r\n a_num_to_win = 3\r\n a_game = Game(a_players,a_x_dist,a_y_dist,a_num_to_win)\r\n\r\n #game will pause\r\n a_game.play_game()\r\n\r\n while a_game.winner != -1:\r\n a_player_2.set_choice(0)\r\n a_game.resume_game()", "def onSkipSegLimit(self):\r\n profprint()\r\n #research\r\n logic = self.logic\r\n logic.placeAxialLimitMarker(assign=False)", "def setUp(self):\r\n # How many times a p-value should be tested to fall in a given range\r\n # before failing the test.\r\n self.p_val_tests = 10", "def set_limit(self, errors):\n self.limit = errors", "def testZeroPlaybacksIsSufficientlyPlayedBack(self):\n\t\tpolicy = MinimumPlaybackPolicy(0)\n\t\tself.failUnless(policy.hasBeenPlayedBack)", "def test_default_limit(self):\n telem = self.create_logs(self.user1, num=200)\n\n response = self.client.get(telemetry_url)\n self.assertEqual(200, response.status_code)\n\n data = json.loads(response.content)\n\n self.assertEqual(100, len(data))", "def pick_number(low, high, limit):\n print(\"Think of a number from \" + str(low) + \" to \" +\n str(high) +\" and I will try to guess it and I will get a total of \" + str(limit) + \" tries. Press Enter when you are ready.\")\n input()", "async def max(self, ctx, limit: int):\n self.data_check(ctx)\n server = ctx.message.server\n\n self.riceCog2[server.id][\"max\"] = limit\n dataIO.save_json(self.warning_settings,\n self.riceCog2)\n await self.bot.say(\"Warn limit is now: \\n{}\".format(limit))", "def callback_max_wall_time_reached(self, event):\n self.perform_final_actions()\n self._max_wall_time_reached = True", "def testDefaultSettingOfOnePlayack(self):\n\t\tpolicy = FixedCountPolicy()\n\t\tself.failIf(policy.hasBeenPlayedBack)\n\t\tself.failIf(policy.isReadyForRemoval)\n\t\tpolicy.playback()\n\t\tself.failUnless(policy.hasBeenPlayedBack)\n\t\tself.failUnless(policy.isReadyForRemoval)" ]
[ "0.6717742", "0.65904355", "0.64534426", "0.64433163", "0.643638", "0.6354526", "0.6331702", "0.6220812", "0.6176772", "0.61617315", "0.6129828", "0.60590106", "0.6015792", "0.5951251", "0.5916098", "0.5896707", "0.58848923", "0.5870503", "0.5835869", "0.5821406", "0.58180004", "0.5814037", "0.5797157", "0.5793415", "0.5787679", "0.57843226", "0.57660675", "0.5761766", "0.57575196", "0.5747896", "0.5736799", "0.5731105", "0.5730338", "0.5714348", "0.56733257", "0.56694835", "0.5655484", "0.5653742", "0.5649527", "0.56452274", "0.5644742", "0.5637151", "0.56346756", "0.56293124", "0.5623637", "0.56234634", "0.56234634", "0.56234634", "0.5623184", "0.5616796", "0.5608406", "0.5604298", "0.5602783", "0.5593414", "0.557875", "0.55744135", "0.5560468", "0.5558361", "0.5558251", "0.55547476", "0.55394185", "0.5537885", "0.5525262", "0.55142725", "0.551293", "0.5501559", "0.5500802", "0.54997534", "0.54926825", "0.5485951", "0.5476497", "0.54704076", "0.54695493", "0.54614145", "0.54564416", "0.5419022", "0.54095256", "0.54092544", "0.540407", "0.54026747", "0.5395024", "0.5394559", "0.5391387", "0.53899425", "0.5388287", "0.5381387", "0.5378073", "0.53710544", "0.5370407", "0.53623974", "0.53616375", "0.536086", "0.5358679", "0.53453386", "0.53342694", "0.53288805", "0.5325309", "0.5323026", "0.53217804", "0.53198063" ]
0.7055831
0
Test limit with some completed buildings.
def test_limit_with_influence(self): g = test_setup.simple_two_player() p1, p2 = g.players p1.influence = ['Stone'] p2.influence = ['Rubble'] self.assertEqual(g._clientele_limit(p1), 5) self.assertEqual(g._clientele_limit(p2), 3) p1.influence = ['Wood'] p2.influence = ['Marble'] self.assertEqual(g._clientele_limit(p1), 3) self.assertEqual(g._clientele_limit(p2), 5) p1.influence = ['Brick'] p2.influence = ['Concrete'] self.assertEqual(g._clientele_limit(p1), 4) self.assertEqual(g._clientele_limit(p2), 4) p1.influence = ['Brick', 'Concrete', 'Marble'] p2.influence = ['Concrete', 'Stone', 'Rubble', 'Rubble', 'Rubble'] self.assertEqual(g._clientele_limit(p1), 9) self.assertEqual(g._clientele_limit(p2), 10)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_communities_created_limit(self):\n self.login_as(\"ben\")\n for i in range(settings.QUIZZZ_CREATED_COMMUNITIES_LIMIT):\n response = self.client.post(self.url, {\"name\": f\"test-group-{i}\"})\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n response = self.client.post(self.url, {\"name\": \"pushthelimit\"})\n self.assert_validation_failed(response, \n data=[\"You have reached the limit for communities created.\"])", "def test_environmental_impact_compliance():\n emissions = 12000\n legal_limit = 300\n assert emissions < legal_limit", "def test_set_project_limits(self):\n pass", "def test_returns_limit_projects(self):\n # Arrange\n # Create and arrange test projects\n self.arrange_projects()\n # Act\n response = self.client.get(\n f\"{self.url}?limit=1\", headers={\"Authorization\": self.user_session_token}\n )\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json[\"results\"]), 1)", "def test_deploy_more_vms_than_limit_allows(self):\n self.test_limits(vm_limit=2)", "def test_identify_limit(limit, all, expected):\n assert identify_limit(limit, all) == expected", "def test_launch_assignments_with_concurrent_unit_cap(self):\n cap_values = [1, 2, 3, 4, 5]\n for max_num_units in cap_values:\n mock_data_array = self.get_mock_assignment_data_array()\n launcher = TaskLauncher(\n self.db,\n self.task_run,\n mock_data_array,\n max_num_concurrent_units=max_num_units,\n )\n launcher.launched_units = LimitedDict(launcher.max_num_concurrent_units)\n launcher.create_assignments()\n launcher.launch_units(\"dummy-url:3000\")\n\n start_time = time.time()\n while set([u.get_status() for u in launcher.units]) != {AssignmentState.COMPLETED}:\n for unit in launcher.units:\n if unit.get_status() == AssignmentState.LAUNCHED:\n unit.set_db_status(AssignmentState.COMPLETED)\n time.sleep(0.1)\n self.assertEqual(launcher.launched_units.exceed_limit, False)\n curr_time = time.time()\n self.assertLessEqual(curr_time - start_time, MAX_WAIT_TIME_UNIT_LAUNCH)\n launcher.expire_units()\n self.tearDown()\n self.setUp()", "def test_communities_joined_limit(self):\n self.assertEqual(settings.QUIZZZ_JOINED_COMMUNITIES_LIMIT, 20)\n\n with self.settings(QUIZZZ_JOINED_COMMUNITIES_LIMIT=3):\n self.login_as(\"admin\")\n for i in range(settings.QUIZZZ_JOINED_COMMUNITIES_LIMIT):\n response = self.client.post(reverse('communities:create-community'), {\"name\": f\"test-group-{i}\"})\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.login_as(\"ben\")\n for i in range(settings.QUIZZZ_JOINED_COMMUNITIES_LIMIT):\n response = self.client.post(self.url, {\"name\": f\"test-group-{i}\"})\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n response = self.client.post(self.url, self.payload)\n self.assert_validation_failed(response, \n data=[\"You have reached the limit for communities joined.\"])", "def generateFinishedBuilds(builders=[], branches=[],\n num_builds=None, finished_before=None,\n max_search=200):", "def test_get_remain_limit(self):\n finder = FinderInsidePro(self.test_key)\n limit = finder.get_remain_limit()\n assert isinstance(limit, int)\n assert limit > 0", "def test_build_jobs(self, mock_cpu_count: unittest.mock.MagicMock) -> None:\n mock_cpu_count.return_value = 13\n cases = [\n # MAX_JOBS, USE_NINJA, IS_WINDOWS, want\n ((\"8\", True, False), [\"-j\", \"8\"]), # noqa: E201,E241\n ((None, True, False), None), # noqa: E201,E241\n ((\"7\", False, False), [\"-j\", \"7\"]), # noqa: E201,E241\n ((None, False, False), [\"-j\", \"13\"]), # noqa: E201,E241\n ((\"6\", True, True), [\"-j\", \"6\"]), # noqa: E201,E241\n ((None, True, True), None), # noqa: E201,E241\n ((\"11\", False, True), [\"/p:CL_MPCount=11\"]), # noqa: E201,E241\n ((None, False, True), [\"/p:CL_MPCount=13\"]), # noqa: E201,E241\n ]\n for (max_jobs, use_ninja, is_windows), want in cases:\n with self.subTest(\n MAX_JOBS=max_jobs, USE_NINJA=use_ninja, IS_WINDOWS=is_windows\n ):\n with contextlib.ExitStack() as stack:\n stack.enter_context(env_var(\"MAX_JOBS\", max_jobs))\n stack.enter_context(\n unittest.mock.patch.object(\n tools.setup_helpers.cmake, \"USE_NINJA\", use_ninja\n )\n )\n stack.enter_context(\n unittest.mock.patch.object(\n tools.setup_helpers.cmake, \"IS_WINDOWS\", is_windows\n )\n )\n\n cmake = tools.setup_helpers.cmake.CMake()\n\n with unittest.mock.patch.object(cmake, \"run\") as cmake_run:\n cmake.build({})\n\n cmake_run.assert_called_once()\n (call,) = cmake_run.mock_calls\n build_args, _ = call.args\n\n if want is None:\n self.assertNotIn(\"-j\", build_args)\n else:\n self.assert_contains_sequence(build_args, want)", "def _check_items_limit(self):\n if self.items_limit and self.items_limit == self.get_metadata('items_count'):\n raise ItemsLimitReached('Finishing job after items_limit reached:'\n ' {} items written.'.format(self.get_metadata('items_count')))", "async def get_builds(self, *, quantity=10):", "def test_max_N_too_small(self):\n\t\t\n\t\t\n\t\tparams = DEFAULT_PARAMS.copy()\n\t\tparams[MAX_N] = DEFAULT_MAX_EVALS+1\n\t\t\n\t\titerator = self.watcher.make_layer_iterator(model=self.model, params=params)\n\t\tfor ww_layer in iterator:\n\t\t\tif ww_layer.N > params[MAX_N]:\n\t\t\t\tself.assertTrue(ww_layer.skipped)\n\t\t\n\t\tdetails = self.watcher.describe(max_N=DEFAULT_MAX_EVALS+1)\n\t\tprint(details[['N','M']])\n\t\tself.assertEqual(10,len(details))\n\n\t\treturn", "def test_limit(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url + \"?limit=5\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 5)\n self.assertEqual(channel.json_body[\"next_token\"], 5)\n self._check_fields(channel.json_body[\"event_reports\"])", "def test_max_members(self):\n self.login_as(self.USER)\n\n group_members = Membership.objects.filter(community_id=self.GROUP_ID).count()\n Community.objects.filter(pk=self.GROUP_ID).update(max_members=group_members)\n \n with self.assertNumQueries(5):\n response = self.client.post(self.url, self.payload)\n self.assert_validation_failed(response, data={\n \"non_field_errors\": [\"This group has reached its member limit.\"]\n })\n self.assertEqual(Membership.objects.count(), self.num_memberships)", "def test_exceed_limit_request(self):\n actions.login(ADMIN_EMAIL)\n ids_list = list(range(SkillAggregateRestHandler.MAX_REQUEST_SIZE))\n get_url = '%s?%s' % (self.URL, urllib.urlencode({\n 'ids': ids_list}, True))\n\n response = transforms.loads(self.get(get_url).body)\n self.assertEqual(412, response['status'])", "async def test_max_processes(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # 2 maximum tasks\n\n # 1 runs at 1 second\n # 2 runs at 2 seconds\n # 3 runs at 11 seconds\n # 4 runs at 12 seconds\n # 5 runs at 21 seconds\n # 6 runs at 22 seconds\n # 7 runs at 31 seconds\n # 8 runs at 32 seconds\n # Total: 6\n\n scheduler.max_running_tasks = 2 # set the maximum number of running tasks in parallel\n\n # Set interval schedule configuration\n interval_schedule = IntervalSchedule()\n interval_schedule.repeat = datetime.timedelta(seconds=1)\n interval_schedule.name = 'max active'\n interval_schedule.exclusive = False\n interval_schedule.process_name = 'sleep10'\n\n await scheduler.save_schedule(interval_schedule)\n\n await asyncio.sleep(30.3)\n scheduler.max_running_tasks = 0 # set the maximum number of running tasks in parallel\n\n tasks = await scheduler.get_tasks(10)\n assert len(tasks) == 6\n\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 2\n\n # They end...\n await asyncio.sleep(20)\n\n scheduler.max_running_tasks = 10\n\n await asyncio.sleep(11)\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 10\n\n await self.stop_scheduler(scheduler)", "def test_custom_per_project_upper_limit(self):\n data = {'payment_amount': '50.00'}\n account = Account(goal=8000, current=3001)\n form = DonationAmountForm(data=data, account=account)\n self.assertFalse(form.is_valid())\n errors = form.errors.as_data()\n self.assertEqual('max_value', errors['payment_amount'][0].code)\n self.assertTrue('$49.99' in errors['payment_amount'][0].message)\n\n account.current = 3000\n form = DonationAmountForm(data=data, account=account)\n self.assertTrue(form.is_valid())", "def test_api_requests_limited(self):\n\n did_reach_rate_limit = False\n for _ in range(110):\n response = self.send_get('Participant', expected_status=None)\n if response.status_code == TooManyRequests.code:\n did_reach_rate_limit = True\n break\n\n self.assertTrue(did_reach_rate_limit)", "def has_more_work(self):\n return self.done_counter < self.N", "def test_exceeded_limit(self):\n msg=self.sample_data(\"error_exceeded_limit.xml\")\n error = ErrorParser().process_all(msg)\n assert isinstance(error, PatronLoanLimitReached)\n eq_(u'Patron cannot loan more than 12 documents', error.message)", "def test_it(self):\n self.n += 1\n if self.n >= 5:\n self.fail(\"eventually failing\")", "def test_limits(manager):\n manager.update(days=40)\n compare_results_attrs(manager.items, fixtures.FIXTURES[51])", "def test_get_limit_6_dependants(self):\n self.assertEqual(\n gross_income.get_limit(dependant_children=6),\n gross_income.BASE_LIMIT + (gross_income.EXTRA_CHILD_MODIFIER*2)\n )", "def test_maximum_processing_queue_not_full(mock_store):\n # GIVEN a flow cell needs to be retrieved from PDC\n backup_api = BackupAPI(\n encryption_api=mock.Mock(),\n encrypt_dir=mock.Mock(),\n status=mock_store,\n tar_api=mock.Mock(),\n pdc_api=mock.Mock(),\n flow_cells_dir=mock.Mock(),\n )\n # WHEN there are no flow cells being retrieved from PDC\n mock_store.get_flow_cells_by_statuses().return_value = []\n\n # THEN this method should return True\n assert backup_api.check_processing() is True", "def generateFinishedBuilds(branches=[],\n num_builds=None,\n max_buildnum=None, finished_before=None,\n max_search=200,\n ):", "def test_group_exceed_max_testcases(self):\n for i in range(1, 31):\n testcase = test_utils.create_generic_testcase()\n testcase.crash_type = 'Heap-buffer-overflow'\n testcase.crash_state = 'abcdefgh' + str(i)\n testcase.project_name = 'project'\n testcase.one_time_crasher_flag = False\n\n # Attach actual issues to some testcases.\n if i in [3, 4, 5]:\n testcase.bug_information = '123'\n\n # Make some testcases unreproducible.\n if i in [1, 2, 3]:\n testcase.one_time_crasher_flag = True\n\n testcase.put()\n\n unrelated_testcase = test_utils.create_generic_testcase()\n\n grouper.group_testcases()\n\n testcase_ids = list(data_handler.get_open_testcase_id_iterator())\n\n # [1, 2] get removed since they are unreproducible testcases.\n # [3] is not removed since it has bug attached (even though unreproducible).\n # [6, 7, 8] are removed to account for max group size. Even though they\n # are reproducible, they are the ones with least weight.\n expected_testcase_ids = [3, 4, 5] + list(range(\n 9, 31)) + [unrelated_testcase.key.id()]\n self.assertEqual(expected_testcase_ids, testcase_ids)", "def test_get_limit_4_dependants(self):\n self.assertEqual(\n gross_income.get_limit(dependant_children=4),\n gross_income.BASE_LIMIT\n )", "def test_query_train_jobs_with_exceeded_limit(self, client):\n params = dict(offset=0, limit=1000)\n url = get_url(BASE_URL, params)\n response = client.get(url)\n result = response.get_json()\n assert result.get('error_code') == '50540002'", "def test_valid_n_jobs(n_jobs: Any) -> None:\n check_n_jobs(n_jobs)", "def STAND_LIMIT() -> int:\n return 15", "def test_buildings_rows(self):\n processed_buildings_output = buildings_clean(\n \"seattlecollision/data/raw_data/raw_buildings_input.csv\")\n self.assertTrue(processed_buildings_output.shape[0] >= 10)", "def test_branches_limit(chikin):\n assert chikin.section.subsection.string == 'Chikin Fly'", "def test_messenger_limit():\n all_messages_resp = requests.get(BASE_URL)\n all_messages = all_messages_resp.json()\n total_message_count = len(all_messages)\n message_limit = total_message_count // 2\n\n query_params = {\"limit\": message_limit}\n limit_resp = requests.get(BASE_URL, params=query_params)\n limited_messages = limit_resp.json()\n assert limit_resp.status_code == 200\n assert len(limited_messages) == message_limit", "def test_collection_limit(testapp):\n obj1 = {\n 'title': \"Testing1\",\n 'description': \"This is testing object 1\",\n }\n obj2 = {\n 'title': \"Testing2\",\n 'description': \"This is testing object 2\",\n }\n obj3 = {\n 'title': \"Testing3\",\n 'description': \"This is testing object 3\",\n }\n testapp.post_json('/embedding-tests', obj1, status=201)\n testapp.post_json('/embedding-tests', obj2, status=201)\n testapp.post_json('/embedding-tests', obj3, status=201)\n res_all = testapp.get('/embedding-tests/?limit=all', status=200)\n res_2 = testapp.get('/embedding-tests/?limit=2', status=200)\n assert len(res_all.json['@graph']) == 3\n assert len(res_2.json['@graph']) == 2", "def test_limit_with_aqueduct(self):\n d = TestDeck()\n\n g = test_setup.simple_two_player()\n\n p1, p2 = g.players\n\n self.assertEqual(g._clientele_limit(p1), 2)\n\n p1.buildings.append(Building(d.aqueduct, 'Concrete', complete=True))\n\n self.assertEqual(g._clientele_limit(p1), 4)\n\n p1.influence = ['Stone']\n\n self.assertEqual(g._clientele_limit(p1), 10)", "def testAtLeastSetsLimit(self):\n\t\tc = Controller()\n\t\tx = c.mock(KlassBeingMocked)\n\t\tx.g = 6\n\t\tc.atLeast(2)\n\t\tc.replay()\n\t\tx.g = 6\n\t\tself.failUnlessRaises(Exception, c.verify)\n\t\tx.g = 6\n\t\tc.verify()", "def sleep_until_next_revision_ready(self, revision_list):\n api = self.api\n\n revision_mapping = {}\n gs_jobs = []\n buildbot_jobs = []\n\n for revision in revision_list:\n url = revision.get_next_url()\n buildbot_job = revision.get_buildbot_locator()\n if url:\n gs_jobs.append({'type': 'gs', 'location': url})\n revision_mapping[url] = revision\n if buildbot_job:\n buildbot_jobs.append(buildbot_job)\n revision_mapping[buildbot_job['job_name']] = revision\n\n jobs_config = {'jobs': buildbot_jobs + gs_jobs}\n\n script = api.resource('wait_for_any.py')\n args_list = [api.m.gsutil.get_gsutil_path()] if gs_jobs else []\n\n try:\n step_name = 'Waiting for revision ' + revision_list[0].revision_string\n if len(revision_list) > 1:\n step_name += ' and %d other revision(s).' % (len(revision_list) - 1)\n api.m.python(\n str(step_name),\n script,\n args_list,\n stdout=api.m.json.output(),\n stdin=api.m.json.input(jobs_config),\n ok_ret={0, 1})\n except api.m.step.StepFailure as sf: # pragma: no cover\n if sf.retcode == 2: # 6 days and no builds finished.\n for revision in revision_list:\n revision.status = revision_state.RevisionState.FAILED\n for revision in revision_list:\n if revision.status == revision.TESTING:\n self.surface_result('TEST_TIMEOUT')\n if revision.status == revision.BUILDING:\n self.surface_result('BUILD_TIMEOUT')\n return None # All builds are failed, no point in returning one.\n else: # Something else went wrong.\n raise\n\n step_results = api.m.step.active_result.stdout\n build_failed = api.m.step.active_result.retcode\n\n if build_failed:\n # Explicitly make the step red.\n api.m.step.active_result.presentation.status = api.m.step.FAILURE\n\n if not step_results:\n # For most recipe_simulation_test cases.\n return None\n\n failed_jobs = step_results.get('failed', [])\n completed_jobs = step_results.get('completed', [])\n last_failed_revision = None\n assert failed_jobs or completed_jobs\n\n # Marked all failed builds as failed\n for job in failed_jobs:\n last_failed_revision = revision_mapping[str(job.get(\n 'location', job.get('job_name')))]\n if 'job_url' in job:\n url = job['job_url']\n api.m.step.active_result.presentation.links['Failed build'] = url\n last_failed_revision.status = revision_state.RevisionState.FAILED\n\n # Return a completed job if available.\n for job in completed_jobs:\n if 'job_url' in job: # pragma: no cover\n url = job['job_url']\n api.m.step.active_result.presentation.links['Completed build'] = url\n return revision_mapping[str(job.get(\n 'location', job.get('job_name')))]\n\n # Or return any of the failed revisions.\n return last_failed_revision", "def checkSubmissions(limit=submission_read_limit):\n submissions = subreddit.get_new(limit=limit)\n internal_count = 0\n\n print(\"\\n---\\n%s - Checking latest submissions...\" % (datetime.now()))\n for submission in submissions:\n if submission.id in already_processed:\n print(\"%s - Skipping previously processed: %s\" % (datetime.now(), submission.id))\n continue\n \n response = getResponse(submission.title, skipPartial=False)\n #print(\"\\t Read(%s): %s\" % (submission.id, submission))\n \n # If valid submission\n if response:\n internal_count += 1\n msg = response[3] # response = [quote_text, conversion_text, value, full_response_text]\n while True:\n try:\n print(\"\\n%s - Commenting on %s...\" % (datetime.now(), submission.id))\n print(\"\\n\\t%s\\n\\n\" % (submission))\n submission.add_comment(msg)\n already_processed.add(submission.id) # Remove from already_processed as we didn't get it\n print(\"> %s - Successful added comment to %s\" % (datetime.now(), submission.id))\n updateProcessed(submission, response)\n break\n except praw.errors.AlreadySubmitted as e:\n print(\"> %s - Already submitted skipping...\" % datetime.now())\n break\n except praw.errors.RateLimitExceeded as e:\n print(\"> %s - Rate Limit Error for replying to {}, sleeping for {} before retrying...\".format(datetime.now(), submission.id, e.sleep_time))\n sleep_time = e.sleep_time\n while sleep_time > 60:\n time.sleep(60) # sleep in increments of 1 minute\n sleep_time -= 60\n print(\"\\t%s - %s seconds to go...\" % (datetime.now(), sleep_time))\n time.sleep(sleep_time)\n\n # Number of comments sent\n return internal_count", "def test_generation_length(self):\n for i in range(1, 20, 3):\n test_obj = FakeOrderBuilder(n=i).build()\n self.assertIs(len(test_obj), i)", "def getPendingBuilds():", "def getPendingBuilds():", "def test_extract_max(self):\n for i in xrange(0,100):\n self.rebuild_all()\n real_max = self.real_heap.extract_max()\n #print \"real max is :\",real_max\n #print \"the copy max is :\",max(self.copy_heap)\n assert real_max == max(self.copy_heap)\n assert self.is_heap_valid(self.real_heap) == True\n \n if i%10==0:\n print \"Extraction of %d/%d is completed\"%(i,100)", "def test_maximum_items(self):\n total = 4711\n self.es.set_maximum_items(total)\n self.assertEqual(self.es._total, total)", "def _limit_fill():\n z = random.randint(0, 10)\n if z/10.0 < LIMIT_FILL_PROBABILITY:\n return True\n else:\n return False", "def test_update_hyperflex_feature_limit_internal(self):\n pass", "def test_get_limit_no_dependants(self):\n self.assertEqual(\n gross_income.get_limit(),\n gross_income.BASE_LIMIT\n )", "def test_anonymous_03_respects_limit_tasks(self):\r\n # Del previous TaskRuns\r\n self.del_task_runs()\r\n\r\n assigned_tasks = []\r\n # Get Task until scheduler returns None\r\n for i in range(10):\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n\r\n while data.get('info') is not None:\r\n # Check that we received a Task\r\n assert data.get('info'), data\r\n\r\n # Save the assigned task\r\n assigned_tasks.append(data)\r\n\r\n # Submit an Answer for the assigned task\r\n tr = TaskRun(app_id=data['app_id'], task_id=data['id'],\r\n user_ip=\"127.0.0.\" + str(i),\r\n info={'answer': 'Yes'})\r\n db.session.add(tr)\r\n db.session.commit()\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n\r\n # Check if there are 30 TaskRuns per Task\r\n tasks = db.session.query(Task).filter_by(app_id=1).all()\r\n for t in tasks:\r\n assert len(t.task_runs) == 10, len(t.task_runs)\r\n # Check that all the answers are from different IPs\r\n err_msg = \"There are two or more Answers from same IP\"\r\n for t in tasks:\r\n for tr in t.task_runs:\r\n assert self.is_unique(tr.user_ip, t.task_runs), err_msg", "def test_max_seconds(self):\n rusher = Rusher(self._sleepy_worker, 2)\n # check that the first worker returns\n _, results = rusher.rush(0.5)\n self.assertEqual(sorted(results), [0])\n # check both workers return\n _, results = rusher.rush(1.5)\n self.assertEqual(sorted(results), [0, 1])", "def test_target_greater_than_alp(self):\n alp = list(range(5))\n targets = generate_targets(alp, 10)\n self.assertEqual(len(targets), 10)\n\n counts = Counter(targets)\n\n for item in alp:\n self.assertEqual(counts[item], 2)", "def launch_project_sizing():\n from queries import IN_PRODUCTION_NEED_SCAN, NEW_NEED_SCAN, OTHER_NEED_SCAN\n if not getattr(settings,\"GNMPLUTOSTATS_PROJECT_SCAN_ENABLED\",False):\n logger.error(\"GNMPLUTOSTATS_PROJECT_SCAN_ENABLED is false, not going to trigger launching\")\n return \"GNMPLUTOSTATS_PROJECT_SCAN_ENABLED is false, not going to trigger launching\"\n\n prioritise_old = getattr(settings,\"GNMPLUTOSTATS_PRIORITISE_OLD\",False)\n if prioritise_old:\n logger.warning(\"GNMPLUTOSTATS_PRIORITISE_OLD is set, will only focus on old projects\")\n\n trigger_limit = int(getattr(settings,\"GNMPLUTOSTATS_PROJECT_SCAN_LIMIT\",10))\n to_trigger = []\n c=0\n\n logger.info(\"Gathering projects to measure\")\n\n if not prioritise_old:\n highest_priority = IN_PRODUCTION_NEED_SCAN.order_by('last_scan')\n for entry in highest_priority:\n to_trigger.append(entry)\n logger.info(\"{0}: {1} ({2})\".format(c, entry,entry.project_status))\n c+=1\n if c>=trigger_limit:\n break\n\n if not prioritise_old and len(to_trigger)<trigger_limit:\n next_priority = NEW_NEED_SCAN.order_by('last_scan')\n for entry in next_priority:\n to_trigger.append(entry)\n logger.info(\"{0}: {1} ({2})\".format(c, entry,entry.project_status))\n c+=1\n if c>=trigger_limit:\n break\n\n if len(to_trigger)<trigger_limit:\n everything_else = OTHER_NEED_SCAN.order_by('last_scan')\n for entry in everything_else:\n to_trigger.append(entry)\n logger.info(\"{0}: {1} ({2})\".format(c, entry,entry.project_status))\n c+=1\n if c>=trigger_limit:\n break\n\n logger.info(\"Projects to scan: \".format(to_trigger))\n if len(to_trigger)==0:\n if prioritise_old:\n logger.error(\"No projects to scan and GNMPLUTOSTATS_PRIORITISE_OLD is set. You should disable this now to pick up new projects\")\n logger.info(\"No projects need to be scanned right now\")\n\n n=0\n for entry in to_trigger:\n n+=1\n calculate_project_size.apply_async(kwargs={'project_id': entry.project_id},queue=getattr(settings,\"GNMPLUTOSTATS_PROJECT_SCAN_QUEUE\",\"celery\"))\n return \"Triggered {0} projects to scan\".format(n)", "def test_limit_with_insula_and_aqueduct(self):\n d = TestDeck()\n\n g = test_setup.simple_two_player()\n\n p1, p2 = g.players\n\n self.assertEqual(g._clientele_limit(p1), 2)\n\n p1.buildings.append(Building(d.aqueduct, 'Concrete', complete=True))\n p1.buildings.append(Building(d.insula, 'Rubble', complete=True))\n\n self.assertEqual(g._clientele_limit(p1), 8)\n\n p1.influence = ['Stone']\n\n self.assertEqual(g._clientele_limit(p1), 14)", "def test_maximum_processing_queue_full(mock_store):\n # GIVEN a flow cell needs to be retrieved from PDC\n backup_api = BackupAPI(\n encryption_api=mock.Mock(),\n encrypt_dir=mock.Mock(),\n status=mock_store,\n tar_api=mock.Mock(),\n pdc_api=mock.Mock(),\n flow_cells_dir=mock.Mock(),\n )\n\n # WHEN there's already a flow cell being retrieved from PDC\n mock_store.get_flow_cells_by_statuses.return_value = [[mock.Mock()]]\n\n # THEN this method should return False\n assert backup_api.check_processing() is False", "def test_get_company_families_in_need_of_plan_b_negative(self):\n self.mock.max_simultaneous_plan_bs = 5\n self.mock.currently_running_companies = [[1, \"run_id1\"], [2, \"run_id2\"], [3, \"run_id3\"], [4, \"run_id4\"], [5, \"run_id5\"]]\n self.mock.company_families_in_need_of_plan_b = None\n\n self.mox.ReplayAll()\n\n # run, baby!\n CompanyAnalyticsPlanBRunner._get_company_families_in_need_of_plan_b(self.mock)\n\n self.assertIsNone(self.mock.company_families_in_need_of_plan_b)\n self.assertEqual(self.mock.max_simultaneous_plan_bs_running, True)", "def test_stat_page_passes_total_jams_available_in_context(self):\n total_jams = 10\n available_jams = 5\n\n p = PlayerFactory()\n bout = BoutFactory() \n bout.home_roster.players.add(p)\n\n for i in range(0, total_jams):\n if(i < available_jams):\n player = p\n else:\n player = None\n j = JamFactory(bout=bout)\n PlayerToJamFactory(player=player, jam=j)\n\n c = Client()\n response = c.get(reverse(self.test_url))\n self.assertEqual(total_jams, \n response.context['total_jams_available'])", "def test_max_features_wo_gridsearch(self):\n X,Y,Z = self.create_bin_data()\n t = self.check_task('RFC nt=1;e=1;c=gini;mf=0.0001', X, Y, Z)\n self.assertEquals(t.parameters['max_features'], 1)", "def test_num_circs_shots(self):\n backend = FakeValencia()\n generator = Generator(backend)\n max_experiments = 5\n max_shots = 10\n backend._configuration.max_experiments = max_experiments\n backend._configuration.max_shots = max_shots\n sub_tests = [1, 3*max_shots, 3*max_shots+1, 3*max_shots-1,\n 3*max_shots*2, 3*max_shots*2+1, 3*max_shots*max_experiments-1]\n for num_raw_bits in sub_tests:\n with self.subTest(num_raw_bits=num_raw_bits):\n result = generator.sample(num_raw_bits=num_raw_bits).block_until_ready()\n self.assertGreaterEqual(len(result.raw_bits), num_raw_bits)", "def test_max_iterations(self, monkeypatch):\r\n\r\n graph = nx.wheel_graph(5)\r\n c = [0, 1]\r\n\r\n with monkeypatch.context() as m:\r\n p = functools.partial(patch_random_choice, element=0)\r\n m.setattr(np.random, \"choice\", p)\r\n result = clique.search(c, graph, iterations=5)\r\n\r\n assert result == [0, 2, 3]", "def test_update_instance_limit(self):\n pass", "def print_big_number_announcement(steps):\n if steps > 5000:\n print(\"It will take a while...\")", "def test_mem_limit_too_high():\n args = argparse.Namespace(cfg=os.path.join(TEST_DATA_DIR, 'mem-limit-too-high.ini'))\n with pytest.raises(UserReportError) as err:\n cfg = ElasticBlastConfig(configure(args), task = ElbCommand.SUBMIT)\n assert err.value.returncode == INPUT_ERROR\n m = re.match(r'Memory limit.*exceeds', err.value.message)\n assert m is not None", "def test_limit_with_insula(self):\n d = TestDeck()\n\n g = test_setup.simple_two_player()\n\n p1, p2 = g.players\n\n self.assertEqual(g._clientele_limit(p1), 2)\n\n p1.buildings.append(Building(d.insula, 'Rubble', complete=True))\n\n self.assertEqual(g._clientele_limit(p1), 4)\n\n p1.influence = ['Stone']\n\n self.assertEqual(g._clientele_limit(p1), 7)", "def test_target_number_less_than_alp(self):\n alp = list(range(10))\n targets = generate_targets(alp, 5)\n self.assertEqual(len(targets), 5)\n self.assertEqual(len(targets), len(set(targets)))", "def test_user_03_respects_limit_tasks(self):\r\n # Del previous TaskRuns\r\n self.create()\r\n self.del_task_runs()\r\n\r\n assigned_tasks = []\r\n # We need one extra loop to allow the scheduler to mark a task as completed\r\n for i in range(11):\r\n self.register(fullname=self.user.username + str(i),\r\n name=self.user.username + str(i),\r\n password=self.user.username + str(i))\r\n self.signin()\r\n # Get Task until scheduler returns None\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n\r\n while data.get('info') is not None:\r\n # Check that we received a Task\r\n assert data.get('info'), data\r\n\r\n # Save the assigned task\r\n assigned_tasks.append(data)\r\n\r\n # Submit an Answer for the assigned task\r\n tr = dict(app_id=data['app_id'], task_id=data['id'],\r\n info={'answer': 'No'})\r\n tr = json.dumps(tr)\r\n self.app.post('/api/taskrun', data=tr)\r\n\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n self.signout()\r\n\r\n # Check if there are 30 TaskRuns per Task\r\n tasks = db.session.query(Task).filter_by(app_id=1).all()\r\n for t in tasks:\r\n assert len(t.task_runs) == 10, t.task_runs\r\n # Check that all the answers are from different IPs\r\n err_msg = \"There are two or more Answers from same User\"\r\n for t in tasks:\r\n for tr in t.task_runs:\r\n assert self.is_unique(tr.user_id, t.task_runs), err_msg\r\n # Check that task.state is updated to completed\r\n for t in tasks:\r\n assert t.state == \"completed\", t.state", "def test_sad_purchasePlaces_12_places_max__step_by_step(self):\n\n club_index = self.add_fake_club(points=100)\n\n print(\"INIT:\", self.competitions, self.clubs)\n\n points = int(self.clubs[club_index][\"points\"])\n slots = int(self.competitions[0][\"numberOfPlaces\"])\n booked = 0\n\n num_actions = 12 + 1\n\n for i in range(1, num_actions + 1):\n rv = self.app.post(\n \"/purchasePlaces\",\n data={\n \"places\": 1,\n \"club\": self.clubs[club_index][\"name\"],\n \"competition\": self.competitions[0][\"name\"],\n },\n )\n\n booked += 1\n print(i, \"\\n\", rv.data, rv.status_code, \"\\n\", server.booking)\n\n if i < num_actions - 1:\n cost = points - (self.cost_per_place * booked)\n assert rv.status_code in [200]\n assert str.encode(f\"Number of Places: {slots-booked}\") in rv.data\n assert str.encode(f\"Points available: {cost}\") in rv.data\n\n assert rv.status_code in [400]\n assert b\"You can&#39;t book more than 12 places per competition\" in rv.data", "def jobHealthy(self, count):\n job = self.tester.submission_result.job\n for idx in range(count - 1):\n if (job.health == 'healthy'):\n return True\n print(\"health check fail : %d\" % idx )\n time.sleep(1)\n job.refresh()\n self.assertEqual('healthy', job.health)\n return False", "def test_limit_and_from(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url + \"?from=5&limit=10\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(channel.json_body[\"next_token\"], 15)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 10)\n self._check_fields(channel.json_body[\"event_reports\"])", "def test_default_limit(self):\n telem = self.create_logs(self.user1, num=200)\n\n response = self.client.get(telemetry_url)\n self.assertEqual(200, response.status_code)\n\n data = json.loads(response.content)\n\n self.assertEqual(100, len(data))", "def test_remain():\r\n global pickno\r\n #Change pick number to the total amount of balls\r\n # Ex. If we have 3 balls remaining the user cannot pick 4\r\n if total <= 4:\r\n pickno = total", "def testMaxTargets(self):\n\n self.assertEqual('Maxtargets: %s' % inventory_base.DEFAULT_MAXTARGETS,\n self.inv._CmdMaxTargets('maxtargets', []))\n self.inv._CmdMaxTargets('maxtargets', ['10'])\n self.assertEqual(10, self.inv._maxtargets)", "def getBuild(number):", "def getBuild(number):", "async def depots_required(\n bot: sc2.BotAI,\n ) -> int:\n growth_speed = 0\n townhall_count = bot.structures(\n {UnitTypeId.COMMANDCENTER, UnitTypeId.PLANETARYFORTRESS, UnitTypeId.ORBITALCOMMAND}\n ).ready.amount\n\n rax_count = bot.structures(UnitTypeId.BARRACKS).ready.amount\n rax_count += bot.structures(UnitTypeId.BARRACKSREACTOR).ready.amount\n\n factory_count = bot.structures(UnitTypeId.FACTORY).ready.amount\n factory_count += bot.structures(UnitTypeId.FACTORYREACTOR).ready.amount\n starport_count = bot.structures(UnitTypeId.STARPORT).ready.amount\n starport_count += bot.structures(UnitTypeId.STARPORTREACTOR).ready.amount\n\n # Probes/scv take 12 seconds to build\n # https://liquipedia.net/starcraft2/Nexus_(Legacy_of_the_Void)\n growth_speed += townhall_count / 12.0\n\n # https://liquipedia.net/starcraft2/Barracks_(Legacy_of_the_Void)\n # fastest usage is marauder supply with 2 supply and train 21 seconds\n growth_speed += rax_count * 2 / 21.0\n\n # https://liquipedia.net/starcraft2/Factory_(Legacy_of_the_Void)\n # fastest usage is helliom with 2 supply and build time of 21 seconds\n growth_speed += factory_count * 2 / 21.0\n\n # https://liquipedia.net/starcraft2/Starport_(Legacy_of_the_Void)\n # We'll use viking timing here\n growth_speed += starport_count * 2 / 30.0\n\n growth_speed *= 1.2 # Just a little bit of margin of error\n build_time = 21 # depot build time\n # build_time += min(self.ai.time / 60, 5) # probe walk time\n\n predicted_supply = min(200, bot.supply_used + build_time * growth_speed)\n current_depots = bot.structures(\n {UnitTypeId.SUPPLYDEPOT, UnitTypeId.SUPPLYDEPOTLOWERED, UnitTypeId.SUPPLYDEPOTDROP}\n ).ready.amount\n\n if bot.supply_cap == 200:\n return current_depots\n\n return ceil((predicted_supply - bot.supply_cap) / 8) + current_depots", "def test_request_limit_inner_larger(self):\n httpretty.register_uri(httpretty.POST, 'http://somewhere.com/test')\n r = CkanResource('http://somewhere.com/test', None, {'offset': 100, 'limit': 100})\n r._get_response(10, 200)\n self._assert_params_equals(httpretty.last_request().path, {'offset': 2100, 'limit': 100})", "def limit_reached(self):\n if len(self.selected) >= self.limit:\n return True\n return False", "def testoptdone(self):\r\n assert self.data.optdone\r\n convergence = numpy.abs(self.data.geovalues[-1]) <= self.data.geotargets\r\n assert sum(convergence) >= 2", "def test_unlimited_run_slow(self):\n cmds = ['/bin/sleep 0',\n '/bin/sleep 1',\n '/bin/sleep 2',]\n\n q = QueueCommands(cmds, 3)\n start = time.time()\n q.run()\n end = time.time()-start\n # we should only take the length of the longest sleep\n self.assertTrue( end > 1.8 and end < 2.2,\n \"took %s seconds, exected ~2\" % (end,))", "def is_limited(self) -> bool:\n return self.__times > ActionState.UNLIMITED", "def test_get_limit_5_dependants(self):\n self.assertEqual(\n gross_income.get_limit(dependant_children=5),\n gross_income.BASE_LIMIT + gross_income.EXTRA_CHILD_MODIFIER\n )", "def ge(value, limit):\n return value >= limit", "def can_fit_more(self):\n\n return len(self._requeue_jobs) < MAX_NUM", "def test_check_large_straight_false(self):\n not_large_straight_fixtures = [[1, 2, 3, 4, 6],\n [1, 3, 4, 5, 6],\n ]\n\n for fixture in not_large_straight_fixtures:\n score = self.roll.check_large_straight(fixture)\n\n self.assertNotEqual(score, 35)\n self.assertEqual(score, 0)\n self.assertEqual(len(fixture), 5)", "def test_all_cuts(self):\n try:\n graph = np.array([[0., 1., 2., 0.],\n [1., 0., 1., 0.],\n [2., 1., 0., 1.],\n [0., 0., 1., 0.]])\n\n optimizer = GoemansWilliamsonOptimizer(num_cuts=10, seed=0)\n\n problem = Maxcut(graph).to_quadratic_program()\n self.assertIsNotNone(problem)\n\n results = optimizer.solve(problem)\n self.assertIsNotNone(results)\n self.assertIsInstance(results, GoemansWilliamsonOptimizationResult)\n\n self.assertIsNotNone(results.x)\n np.testing.assert_almost_equal([0, 1, 1, 0], results.x, 3)\n\n self.assertIsNotNone(results.fval)\n np.testing.assert_almost_equal(4, results.fval, 3)\n\n self.assertIsNotNone(results.samples)\n self.assertEqual(3, len(results.samples))\n except MissingOptionalLibraryError as ex:\n self.skipTest(str(ex))", "def check(self):\n self.__check_request_limit()", "def test_cell_list_limit_positive_num_success(self, mock_list):\n self.shell('cell-list -r 1 --limit 1')\n mock_list.assert_called_once_with(limit=1)", "def test_limited_run_slow(self):\n cmds = ['/bin/sleep 1',\n '/bin/sleep 2',\n '/bin/sleep 3',]\n\n q = QueueCommands(cmds, 2)\n\n start = time.time()\n q.run()\n end = time.time()-start\n self.assertTrue( end > 3.7 and end < 4.3,\n \"took %s seconds, expected ~4\" % (end,))", "def test_initial_limit(self):\n\n g = test_setup.simple_two_player()\n\n p1, p2 = g.players\n\n self.assertEqual(g._clientele_limit(p1), 2)\n self.assertEqual(g._clientele_limit(p2), 2)", "def _test_max_simulation_step(self):\n previous_step = self.program.steps[0]\n previous_pb_frame = self.program.steps[0].playback_frames[0]\n for step in self.program.steps:\n for index, pb_frame in enumerate(step.playback_frames):\n if self.program.simulation_type == InstructionListJointsFlags.TimeBased:\n msg = f\"Step {step.name} playback frame {index}, time_step {pb_frame.time_step} not in 'max_time_step' bounds\"\n self.assertLessEqual(pb_frame.time_step, self.program.max_time_step, msg)\n else:\n move_type = step.move_type if index != 0 else previous_step.move_type\n if move_type == MoveType.Joint:\n msg_deg = f\"Step {step.name} (Joint) playback frame {index}, deg_step {pb_frame.deg_step} not in 'max_deg_step' bounds\"\n\n # Check if value given in list result is smaller than max for simulation\n self.assertLessEqual(pb_frame.deg_step, self.program.max_deg_step, msg_deg)\n\n # Check if actual step is smaller than max for simulation\n actual_deg_step = max([abs(j_a[0] - j_b[0]) for j_a, j_b\n in zip(pb_frame.joints.rows, previous_pb_frame.joints.rows)])\n self.assertLessEqual(actual_deg_step, self.program.max_deg_step, msg_deg)\n else:\n msg_mm = f\"Step {step.name} (Frame )playback frame {index}, mm_step {pb_frame.mm_step} not in 'max_mm_step' bounds\"\n\n # Check if value given in list result is smaller than max for simulation\n self.assertLessEqual(pb_frame.mm_step, self.program.max_mm_step, msg_mm)\n\n # Check if actual step is smaller than max for simulation\n actual_mm_step = sqrt(sum([(c_a[0] - c_b[0]) * (c_a[0] - c_b[0]) for c_a, c_b\n in zip(pb_frame.coords.rows, previous_pb_frame.coords.rows)]))\n self.assertLessEqual(actual_mm_step, self.program.max_mm_step, msg_mm)\n\n previous_pb_frame = pb_frame\n previous_step = step", "def _test_out_of_range(self):\n self.cdbconf.setup('KKG')\n self.cdbconf.setConfiguration('CUSTOM_OPT')\n az, el, latitude = [radians(50)] * 3\n site_info = {'latitude': latitude}\n self.p.setup(site_info, self.source, self.device)\n self.p.setRewindingMode('AUTO')\n offset = 20\n max_limit = self.device.getMaxLimit() \n min_limit = self.device.getMinLimit()\n Pis = max_limit - offset/2\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.p.setPosition(Pis)\n time.sleep(0.2) # Wait a bit for the setup\n max_rewinding_steps = (max_limit - min_limit) // self.device.getStep()\n expected = Pis - max_rewinding_steps*self.device.getStep() + offset\n self.source.setAzimuth(az)\n self.source.setElevation(el)\n self.p.startUpdating('MNG_TRACK', 'ANT_NORTH', az, el, None, None)\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.p.setOffset(offset)\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.assertEqual(self.device.getActPosition(), expected)", "def testoptdone(self):\r\n assert self.data.optdone\r\n assert numpy.all(numpy.abs(self.data.geovalues[-1]) <= self.data.geotargets)", "def test_set_maximum(self):\n self.server_widget.maximum = 1000\n assert self.client_widget.maximum == self.server_widget.maximum", "def testEnsurePlaybacksAreLimited(self):\n\t\tpolicy = FixedCountPolicy()\n\t\tself.failIf(policy.hasUnlimitedPlaybacks)", "def test_listing_from_wall(self):", "def test_n_results_greater_than_500(self):\n\n wk = os.path.dirname(os.path.abspath(__file__))\n f = os.path.join(wk, \"search_tweets.config\")\n thing = SearchTweets(self.db, f)\n with patch.object(thing, '_SearchTweets__twitter_n_results', new_callable=PropertyMock(return_value=1070)):\n with patch.object(thing, '_SearchTweets__twitter_all_tweets',\n new_callable=PropertyMock(return_value=False)):\n with patch.object(thing, '_SearchTweets__multi_user', new_callable=PropertyMock(return_value=False)):\n with patch.object(thing, '_SearchTweets__twitter_users',\n new_callable=PropertyMock(return_value=[])):\n with patch.object(thing, '_SearchTweets__twitter_keyword',\n new_callable=PropertyMock(return_value=\"Eurovision\")):\n with patch.object(thing, '_SearchTweets__save'):\n\n thing.search()\n\n self.assertEqual(thing.total_result, 1070)", "def test_quick_build1(self):\n pass", "def test_check_large_straight_true(self):\n large_straight_fixtures = [[1, 2, 3, 4, 5],\n [2, 3, 4, 5, 6],\n ]\n\n for fixture in large_straight_fixtures:\n score = self.roll.check_large_straight(fixture)\n\n self.assertEqual(score, 35)\n self.assertEqual(len(fixture), 5)", "def has_more_trials(self) -> bool:\r\n raise NotImplementedError", "def calculate(self, limit):\r\n pass", "def test_create_hyperflex_feature_limit_internal(self):\n pass", "def testVerifyArbitraryLimits(self):\n\t\tpolicy = MinimumPlaybackPolicy(3)\n\t\tfor x in range(0, 3):\n\t\t\tself.failIf(policy.hasBeenPlayedBack)\n\t\t\tself.failIf(policy.isReadyForRemoval)\n\t\t\tpolicy.playback()\n\t\tself.failUnless(policy.hasBeenPlayedBack)\n\t\tself.failIf(policy.isReadyForRemoval)" ]
[ "0.6331062", "0.6307083", "0.6275143", "0.62501746", "0.62440616", "0.62208784", "0.6138763", "0.613732", "0.6108998", "0.61038834", "0.60081613", "0.58726895", "0.58467835", "0.58440167", "0.58301693", "0.5813597", "0.5789836", "0.5772815", "0.5768534", "0.5768062", "0.57310224", "0.5729513", "0.5722892", "0.57207364", "0.5720562", "0.5711001", "0.5691037", "0.5690936", "0.5690555", "0.5680691", "0.5670197", "0.5638327", "0.5631145", "0.56164646", "0.55966717", "0.5596263", "0.55956256", "0.559484", "0.5591941", "0.5591072", "0.5591052", "0.5590616", "0.5590616", "0.5590006", "0.5585539", "0.5557048", "0.55479383", "0.5543028", "0.55303055", "0.55253065", "0.5520842", "0.5519562", "0.55156004", "0.5510211", "0.55079025", "0.55041504", "0.5499578", "0.54933333", "0.54890037", "0.54885584", "0.5487803", "0.5486769", "0.54849994", "0.5473202", "0.54664296", "0.5461192", "0.54592764", "0.5453995", "0.5443142", "0.5441775", "0.5436194", "0.543405", "0.543405", "0.54275376", "0.5425841", "0.5416565", "0.5411203", "0.5411079", "0.5396361", "0.5387066", "0.53781074", "0.53743285", "0.5373865", "0.5373659", "0.5373386", "0.5363978", "0.5361473", "0.5353938", "0.5348264", "0.53459", "0.5344916", "0.53427124", "0.53327036", "0.5323202", "0.53224117", "0.53157234", "0.5313147", "0.53127706", "0.53079236", "0.5306103", "0.5302329" ]
0.0
-1
Test limit with completed Insula.
def test_limit_with_insula(self): d = TestDeck() g = test_setup.simple_two_player() p1, p2 = g.players self.assertEqual(g._clientele_limit(p1), 2) p1.buildings.append(Building(d.insula, 'Rubble', complete=True)) self.assertEqual(g._clientele_limit(p1), 4) p1.influence = ['Stone'] self.assertEqual(g._clientele_limit(p1), 7)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_identify_limit(limit, all, expected):\n assert identify_limit(limit, all) == expected", "def test_get_remain_limit(self):\n finder = FinderInsidePro(self.test_key)\n limit = finder.get_remain_limit()\n assert isinstance(limit, int)\n assert limit > 0", "def test_limit(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url + \"?limit=5\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 5)\n self.assertEqual(channel.json_body[\"next_token\"], 5)\n self._check_fields(channel.json_body[\"event_reports\"])", "def calculate(self, limit):\r\n pass", "def limit(self, limit):\n raise NotImplementedError(\"This should have been implemented.\")", "def test_collection_limit(testapp):\n obj1 = {\n 'title': \"Testing1\",\n 'description': \"This is testing object 1\",\n }\n obj2 = {\n 'title': \"Testing2\",\n 'description': \"This is testing object 2\",\n }\n obj3 = {\n 'title': \"Testing3\",\n 'description': \"This is testing object 3\",\n }\n testapp.post_json('/embedding-tests', obj1, status=201)\n testapp.post_json('/embedding-tests', obj2, status=201)\n testapp.post_json('/embedding-tests', obj3, status=201)\n res_all = testapp.get('/embedding-tests/?limit=all', status=200)\n res_2 = testapp.get('/embedding-tests/?limit=2', status=200)\n assert len(res_all.json['@graph']) == 3\n assert len(res_2.json['@graph']) == 2", "def test_exceed_limit_request(self):\n actions.login(ADMIN_EMAIL)\n ids_list = list(range(SkillAggregateRestHandler.MAX_REQUEST_SIZE))\n get_url = '%s?%s' % (self.URL, urllib.urlencode({\n 'ids': ids_list}, True))\n\n response = transforms.loads(self.get(get_url).body)\n self.assertEqual(412, response['status'])", "def test_api_requests_limited(self):\n\n did_reach_rate_limit = False\n for _ in range(110):\n response = self.send_get('Participant', expected_status=None)\n if response.status_code == TooManyRequests.code:\n did_reach_rate_limit = True\n break\n\n self.assertTrue(did_reach_rate_limit)", "def test_limit(self):\n\t\tfor lim in [1, '234', -100, '-200']:\n\t\t\tself.filter.set_limit(lim)\n\t\t\tself.assertEqual(int(lim), self.filter.get_limit(), \"Limit mismatch: %s!=%s\" % (lim, self.filter.get_limit()))\n\t\tself.filter.set_limit('test')\n\t\tself.assertEqual('test', self.filter.get_limit(), \"String set failed for Filter limit.\")", "def test_exceeded_limit(self):\n msg=self.sample_data(\"error_exceeded_limit.xml\")\n error = ErrorParser().process_all(msg)\n assert isinstance(error, PatronLoanLimitReached)\n eq_(u'Patron cannot loan more than 12 documents', error.message)", "def test_limit_and_from(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url + \"?from=5&limit=10\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(channel.json_body[\"next_token\"], 15)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 10)\n self._check_fields(channel.json_body[\"event_reports\"])", "def test_get_limit_no_dependants(self):\n self.assertEqual(\n gross_income.get_limit(),\n gross_income.BASE_LIMIT\n )", "def test_query_train_jobs_with_exceeded_limit(self, client):\n params = dict(offset=0, limit=1000)\n url = get_url(BASE_URL, params)\n response = client.get(url)\n result = response.get_json()\n assert result.get('error_code') == '50540002'", "def test_limit_is_negative(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url + \"?limit=-5\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])", "def test_05_user_progress(self):\r\n url = '/api/app/1/userprogress'\r\n self.check_limit(url, 'get', 'app')", "def test_default_limit(self):\n telem = self.create_logs(self.user1, num=200)\n\n response = self.client.get(telemetry_url)\n self.assertEqual(200, response.status_code)\n\n data = json.loads(response.content)\n\n self.assertEqual(100, len(data))", "def test_maximum_items(self):\n total = 4711\n self.es.set_maximum_items(total)\n self.assertEqual(self.es._total, total)", "def calculate(self, limit):\n pass", "def test_messenger_limit():\n all_messages_resp = requests.get(BASE_URL)\n all_messages = all_messages_resp.json()\n total_message_count = len(all_messages)\n message_limit = total_message_count // 2\n\n query_params = {\"limit\": message_limit}\n limit_resp = requests.get(BASE_URL, params=query_params)\n limited_messages = limit_resp.json()\n assert limit_resp.status_code == 200\n assert len(limited_messages) == message_limit", "def test_update_instance_limit(self):\n pass", "def limit(self, limit):\n self._evaluated = False\n self._limit = limit\n return self", "def test_it(self):\n self.n += 1\n if self.n >= 5:\n self.fail(\"eventually failing\")", "def test_deploy_more_vms_than_limit_allows(self):\n self.test_limits(vm_limit=2)", "def test_limits(manager):\n manager.update(days=40)\n compare_results_attrs(manager.items, fixtures.FIXTURES[51])", "def test_initial_limit(self):\n\n g = test_setup.simple_two_player()\n\n p1, p2 = g.players\n\n self.assertEqual(g._clientele_limit(p1), 2)\n self.assertEqual(g._clientele_limit(p2), 2)", "def testEnsurePlaybacksAreLimited(self):\n\t\tpolicy = FixedCountPolicy()\n\t\tself.failIf(policy.hasUnlimitedPlaybacks)", "def test_environmental_impact_compliance():\n emissions = 12000\n legal_limit = 300\n assert emissions < legal_limit", "def test_limit_cpu(self):\n url = '/api/apps'\n body = {'cluster': 'autotest'}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n app_id = response.data['id']\n url = '/api/apps/{app_id}/limits'.format(**locals())\n # check default limit\n response = self.client.get(url, content_type='application/json')\n self.assertEqual(response.status_code, 200)\n self.assertIn('cpu', response.data)\n self.assertEqual(json.loads(response.data['cpu']), {})\n # regression test for https://github.com/deis/deis/issues/1563\n self.assertNotIn('\"', response.data['cpu'])\n # set an initial limit\n body = {'cpu': json.dumps({'web': '1024'})}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n self.assertIn('x-deis-release', response._headers)\n limit1 = response.data\n # check memory limits\n response = self.client.get(url, content_type='application/json')\n self.assertEqual(response.status_code, 200)\n self.assertIn('cpu', response.data)\n cpu = json.loads(response.data['cpu'])\n self.assertIn('web', cpu)\n self.assertEqual(cpu['web'], '1024')\n # set an additional value\n body = {'cpu': json.dumps({'worker': '512'})}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n limit2 = response.data\n self.assertNotEqual(limit1['uuid'], limit2['uuid'])\n cpu = json.loads(response.data['cpu'])\n self.assertIn('worker', cpu)\n self.assertEqual(cpu['worker'], '512')\n self.assertIn('web', cpu)\n self.assertEqual(cpu['web'], '1024')\n # read the limit again\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n limit3 = response.data\n self.assertEqual(limit2, limit3)\n cpu = json.loads(response.data['cpu'])\n self.assertIn('worker', cpu)\n self.assertEqual(cpu['worker'], '512')\n self.assertIn('web', cpu)\n self.assertEqual(cpu['web'], '1024')\n # unset a value\n body = {'memory': json.dumps({'worker': None})}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n limit4 = response.data\n self.assertNotEqual(limit3['uuid'], limit4['uuid'])\n self.assertNotIn('worker', json.dumps(response.data['memory']))\n # disallow put/patch/delete\n self.assertEqual(self.client.put(url).status_code, 405)\n self.assertEqual(self.client.patch(url).status_code, 405)\n self.assertEqual(self.client.delete(url).status_code, 405)\n return limit4", "def test_request_limit_inner_larger(self):\n httpretty.register_uri(httpretty.POST, 'http://somewhere.com/test')\n r = CkanResource('http://somewhere.com/test', None, {'offset': 100, 'limit': 100})\n r._get_response(10, 200)\n self._assert_params_equals(httpretty.last_request().path, {'offset': 2100, 'limit': 100})", "def test_calculate_number_page_number_products_max_different(self):\n self.requestapi = RequestApi('snack', number_products_max=1000)\n self.requestapi.number_products = 1000\n result = self.requestapi.calculate_number_page()\n self.assertEqual(result, 10)", "def _check_items_limit(self):\n if self.items_limit and self.items_limit == self.get_metadata('items_count'):\n raise ItemsLimitReached('Finishing job after items_limit reached:'\n ' {} items written.'.format(self.get_metadata('items_count')))", "def __init__(self, reason, lim=0):\n self.successes = 0\n self.tests = 0\n self.reason = reason\n self.limit = lim", "def test_fail_on_rate_limit_exceeded(self):\n\n # setup 'short' limit for testing\n self.client.protocol.rate_limiter.rules = []\n self.client.protocol.rate_limiter.rules.append(\n XRateLimitRule(\n {\n \"short\": {\n \"usage\": 0,\n \"limit\": 600,\n \"time\": 5,\n \"lastExceeded\": None,\n },\n \"long\": {\n \"usage\": 0,\n \"limit\": 30000,\n \"time\": 5,\n \"lastExceeded\": None,\n },\n }\n )\n )\n\n # interact with api to get the limits\n self.client.get_athlete()\n\n # access the default rate limit rule\n rate_limit_rule = self.client.protocol.rate_limiter.rules[0]\n\n # get any of the rate limits, ex the 'short'\n limit = rate_limit_rule.rate_limits[\"short\"]\n\n # get current usage\n usage = limit[\"usage\"]\n print(\"last rate limit usage is {0}\".format(usage))\n\n # for testing purpses set the limit to usage\n limit[\"limit\"] = usage\n print(\"changing limit to {0}\".format(limit[\"limit\"]))\n\n # expect exception because of RateLimit has been\n # exceeded (or reached max)\n with self.assertRaises(exc.RateLimitExceeded):\n self.client.get_athlete()\n\n # request fired to early (less than 5 sec) causes timeout exception\n with self.assertRaises(exc.RateLimitTimeout):\n self.client.get_athlete()\n\n # once rate limit has exceeded wait until another request is possible\n # check if timeout has been set\n self.assertTrue(rate_limit_rule.limit_timeout > 0)\n print(\"limit timeout {0}\".format(rate_limit_rule.limit_timeout))\n\n # resetting limit\n # simulates Strava api - it would set the usage again to 0\n limit[\"limit\"] = 600\n print(\"resetting limit to {0}\".format(limit[\"limit\"]))\n\n try:\n # waiting until timeout expires\n time.sleep(5)\n\n # this time it should work again\n self.client.get_athlete()\n self.assertTrue(\"No exception raised\")\n except exc.RateLimitExceeded as e:\n self.fail(\"limiter raised RateLimitTimeout unexpectedly!\")\n\n # continue other tests with DefaultRateLimiter\n print(\"setting default rate limiter\")\n self.client.protocol.rate_limiter = DefaultRateLimiter()", "def test_update_hyperflex_feature_limit_internal(self):\n pass", "def test_request_limit_overflow(self):\n httpretty.register_uri(httpretty.POST, 'http://somewhere.com/test')\n r = CkanResource('http://somewhere.com/test', None, {'offset': 100, 'limit': 100})\n r._get_response(200, 20)\n self._assert_params_equals(httpretty.last_request().path, {'offset': 4100, 'limit': 20})", "def test_rate_limited(self):\n response = self._mock_utility(get_kwargs=self._data(),\n error=fitbit_exceptions.HTTPConflict)\n self._check_response(response, 105)", "def test_mem_limit_too_high():\n args = argparse.Namespace(cfg=os.path.join(TEST_DATA_DIR, 'mem-limit-too-high.ini'))\n with pytest.raises(UserReportError) as err:\n cfg = ElasticBlastConfig(configure(args), task = ElbCommand.SUBMIT)\n assert err.value.returncode == INPUT_ERROR\n m = re.match(r'Memory limit.*exceeds', err.value.message)\n assert m is not None", "def STAND_LIMIT() -> int:\n return 15", "def test_max_N_too_small(self):\n\t\t\n\t\t\n\t\tparams = DEFAULT_PARAMS.copy()\n\t\tparams[MAX_N] = DEFAULT_MAX_EVALS+1\n\t\t\n\t\titerator = self.watcher.make_layer_iterator(model=self.model, params=params)\n\t\tfor ww_layer in iterator:\n\t\t\tif ww_layer.N > params[MAX_N]:\n\t\t\t\tself.assertTrue(ww_layer.skipped)\n\t\t\n\t\tdetails = self.watcher.describe(max_N=DEFAULT_MAX_EVALS+1)\n\t\tprint(details[['N','M']])\n\t\tself.assertEqual(10,len(details))\n\n\t\treturn", "def send_it(self, func, limit, *args, **kwargs):\n counter = 0\n if counter > limit:\n return False\n counter += 1\n try:\n result = func(*args, **kwargs)\n time.sleep(1.1)\n return result\n except gspread.exceptions.APIError as e:\n if (e.response.json())['error']['code'] == 429:\n time.sleep(501)\n self.send_it(func, limit, *args, **kwargs)\n else:\n print(e)\n return False\n except Exception as e:\n print(e)\n return False", "def test_set_project_limits(self):\n pass", "def test_sleep_request(self):\n date = datetime.now() - timedelta(minutes=14)\n RequestAPI.objects.create(total_request=450, date=date)\n start = time.time()\n ManagerRequestApiTwitter().handle_rate_limit()\n stop = time.time()\n total_time = stop - start\n self.assertGreater(total_time, 60)", "def test_limit_with_insula_and_aqueduct(self):\n d = TestDeck()\n\n g = test_setup.simple_two_player()\n\n p1, p2 = g.players\n\n self.assertEqual(g._clientele_limit(p1), 2)\n\n p1.buildings.append(Building(d.aqueduct, 'Concrete', complete=True))\n p1.buildings.append(Building(d.insula, 'Rubble', complete=True))\n\n self.assertEqual(g._clientele_limit(p1), 8)\n\n p1.influence = ['Stone']\n\n self.assertEqual(g._clientele_limit(p1), 14)", "def test_request_limit_inner_smaller(self):\n httpretty.register_uri(httpretty.POST, 'http://somewhere.com/test')\n r = CkanResource('http://somewhere.com/test', None, {'offset': 100, 'limit': 100})\n r._get_response(10, 20)\n self._assert_params_equals(httpretty.last_request().path, {'offset': 300, 'limit': 20})", "def get_max_iters():\n return 2000", "def num_trials(self):", "def test_communities_created_limit(self):\n self.login_as(\"ben\")\n for i in range(settings.QUIZZZ_CREATED_COMMUNITIES_LIMIT):\n response = self.client.post(self.url, {\"name\": f\"test-group-{i}\"})\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n response = self.client.post(self.url, {\"name\": \"pushthelimit\"})\n self.assert_validation_failed(response, \n data=[\"You have reached the limit for communities created.\"])", "def test_pagination(self):\n self.check_pagination()", "def testVerifyArbitraryLimits(self):\n\t\tpolicy = MinimumPlaybackPolicy(3)\n\t\tfor x in range(0, 3):\n\t\t\tself.failIf(policy.hasBeenPlayedBack)\n\t\t\tself.failIf(policy.isReadyForRemoval)\n\t\t\tpolicy.playback()\n\t\tself.failUnless(policy.hasBeenPlayedBack)\n\t\tself.failIf(policy.isReadyForRemoval)", "def set_result_limit(self, data):\n self.add_payload('resultLimit', data)\n self._result_limit = self._uni(data)", "def test(self):\n return test_throttle_method()", "def calculate(self, limit: int) -> None:\n raise NotImplementedError()", "def test_limits_boundary_values(self):\n\n def check_error_msg(status, output, storagelimit=False):\n import json\n if status == False:\n content = json.loads(output)[\"errors\"]\n if storagelimit:\n actual_error = content[\"dataStorageLimit\"]\n expected_error = '\"dataStorageLimit\" must be an integer between -1 and 100000'\n else:\n actual_error = content[\"dataThrottleLimit\"]\n expected_error = '\"dataThrottleLimit\" must be an integer between -1 and 2147483647'\n self.assertEqual(actual_error, expected_error)\n else:\n self.fail(\"expected to fail but passsed\")\n\n bucket = self.cluster.buckets[0]\n server = random.choice(bucket.servers)\n bucket_helper = BucketHelper(server)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=-2)\n check_error_msg(status, content)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=2147483648)\n check_error_msg(status, content)\n\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n storage_limit=-2)\n check_error_msg(status, content, True)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n storage_limit=2147483648)\n check_error_msg(status, content, True)\n\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=-2,\n storage_limit=-2)\n check_error_msg(status, content)\n check_error_msg(status, content, True)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=2147483648,\n storage_limit=2147483648)\n check_error_msg(status, content)\n check_error_msg(status, content, True)", "def _determine_limit(self, limit):\n\n # Note: +1 is allowed here because it allows\n # the user to fetch one beyond to see if they\n # are at the end of the list\n if not limit:\n res = conf.api_configuration.max_returned_num + 1\n else:\n res = min(conf.api_configuration.max_returned_num + 1, limit)\n\n return res", "def test_anonymous_03_respects_limit_tasks(self):\r\n # Del previous TaskRuns\r\n self.del_task_runs()\r\n\r\n assigned_tasks = []\r\n # Get Task until scheduler returns None\r\n for i in range(10):\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n\r\n while data.get('info') is not None:\r\n # Check that we received a Task\r\n assert data.get('info'), data\r\n\r\n # Save the assigned task\r\n assigned_tasks.append(data)\r\n\r\n # Submit an Answer for the assigned task\r\n tr = TaskRun(app_id=data['app_id'], task_id=data['id'],\r\n user_ip=\"127.0.0.\" + str(i),\r\n info={'answer': 'Yes'})\r\n db.session.add(tr)\r\n db.session.commit()\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n\r\n # Check if there are 30 TaskRuns per Task\r\n tasks = db.session.query(Task).filter_by(app_id=1).all()\r\n for t in tasks:\r\n assert len(t.task_runs) == 10, len(t.task_runs)\r\n # Check that all the answers are from different IPs\r\n err_msg = \"There are two or more Answers from same IP\"\r\n for t in tasks:\r\n for tr in t.task_runs:\r\n assert self.is_unique(tr.user_ip, t.task_runs), err_msg", "def test_create_hyperflex_feature_limit_internal(self):\n pass", "def test_30(self):\n assert 'False' == Api.requestBlock('test-30')", "def time_limit(self):\n return 2503", "def test_25(self):\n assert 'False' == Api.requestBlock('test-25')", "def check_limit(limit_value):\n try:\n limit = int(limit_value)\n except ValueError:\n raise SystemExit('The argument \"limit\" should be a positive number')\n else:\n if limit < 1:\n raise SystemExit('The argument \"limit\" should be greater than 0')\n else:\n return limit", "def limit(self, limit):\n\n # Return between 1 and 250 results, defaults to 10\n return max(1, min(250, int(limit) if limit else 10))", "def test_max_members(self):\n self.login_as(self.USER)\n\n group_members = Membership.objects.filter(community_id=self.GROUP_ID).count()\n Community.objects.filter(pk=self.GROUP_ID).update(max_members=group_members)\n \n with self.assertNumQueries(5):\n response = self.client.post(self.url, self.payload)\n self.assert_validation_failed(response, data={\n \"non_field_errors\": [\"This group has reached its member limit.\"]\n })\n self.assertEqual(Membership.objects.count(), self.num_memberships)", "def test_update_hyperflex_feature_limit_external(self):\n pass", "def test_channel_messages_unlimited_pagination():\n clear()\n userOne = auth_register('firstuser@gmail.com', '123abc!@#', 'First', 'User') \n randChannel = channels_create(userOne['token'], 'randChannel', True)\n for _ in range(149):\n message_send(userOne['token'], randChannel['channel_id'], 'Hello')\n messages = channel_messages(userOne['token'], randChannel['channel_id'], 0)\n assert(messages['start'] == 0)\n assert(messages['end'] == 50) \n messages2 = channel_messages(userOne['token'], randChannel['channel_id'], 50)\n assert(messages2['start'] == 50)\n assert(messages2['end'] == 100) \n messages3 = channel_messages(userOne['token'], randChannel['channel_id'], 100)\n assert(messages3['start'] == 100)\n assert(messages3['end'] == -1) \n assert(len(messages3['messages']) == 49)\n # an error should be raised when start is beyond 149 messages\n with pytest.raises(InputError): \n channel_messages(userOne['token'], randChannel['channel_id'], 150)", "def test_polling_custom_interval():\n with pytest.raises(polling2.MaxCallException):\n polling2.poll(\n target=lambda: requests.get(\"http://google.com\").status_code == 400,\n step_function=_custom_step, # adds 0.5 seconds to each iteration\n # step_function=polling.step_constant, # returns step\n # step_function=polling.step_linear_double, # returns step * 2\n step=0.5,\n max_tries=3,\n )", "def test_update_instance_limit1(self):\n pass", "def adb_video_limit(given_limit):\n return given_limit", "def test_huge_answers(self):\n self.init_player(\n '0', 'Welcome to Oppia!', 'do you know where the name \\'Oppia\\'')\n self.submit_and_compare(\n '0', '', 'In fact, the word Oppia means \\'learn\\'.')\n # This could potentially cause errors in stats_models when the answer\n # is persisted to the backend.\n self.submit_and_compare(\n 'a' * 1000500, 'Sorry, nope, we didn\\'t get it', '')", "def mbieLoop (self) :\n self.iterCnt = 0\n while self.iterCnt < 5000:\n s = self.mdp.s0\n for h in range(self.H) :\n self.QUpper = QBoundsSolver(self.mdp, self.PHat, self.QUpper, self.Ntotal, 0.1, True, self.stop)\n a = np.argmax(self.QUpper[s])\n s_, self.R[s,a] = self.mdp.step(s, a)\n self.updateVisitStatistics(s, a, s_)\n s = s_\n\n if self.iterCnt % 10 == 0: \n print(self.iterCnt)\n print(self.QUpper)\n\n self.iterCnt += 1", "def check(self):\n self.__check_request_limit()", "def test_returns_limit_projects(self):\n # Arrange\n # Create and arrange test projects\n self.arrange_projects()\n # Act\n response = self.client.get(\n f\"{self.url}?limit=1\", headers={\"Authorization\": self.user_session_token}\n )\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json[\"results\"]), 1)", "def test_limit_memory(self):\n url = '/api/apps'\n body = {'cluster': 'autotest'}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n app_id = response.data['id']\n url = '/api/apps/{app_id}/limits'.format(**locals())\n # check default limit\n response = self.client.get(url, content_type='application/json')\n self.assertEqual(response.status_code, 200)\n self.assertIn('memory', response.data)\n self.assertEqual(json.loads(response.data['memory']), {})\n # regression test for https://github.com/deis/deis/issues/1563\n self.assertNotIn('\"', response.data['memory'])\n # set an initial limit\n mem = {'web': '1G'}\n body = {'memory': json.dumps(mem)}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n self.assertIn('x-deis-release', response._headers)\n limit1 = response.data\n # check memory limits\n response = self.client.get(url, content_type='application/json')\n self.assertEqual(response.status_code, 200)\n self.assertIn('memory', response.data)\n memory = json.loads(response.data['memory'])\n self.assertIn('web', memory)\n self.assertEqual(memory['web'], '1G')\n # set an additional value\n body = {'memory': json.dumps({'worker': '512M'})}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n limit2 = response.data\n self.assertNotEqual(limit1['uuid'], limit2['uuid'])\n memory = json.loads(response.data['memory'])\n self.assertIn('worker', memory)\n self.assertEqual(memory['worker'], '512M')\n self.assertIn('web', memory)\n self.assertEqual(memory['web'], '1G')\n # read the limit again\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n limit3 = response.data\n self.assertEqual(limit2, limit3)\n memory = json.loads(response.data['memory'])\n self.assertIn('worker', memory)\n self.assertEqual(memory['worker'], '512M')\n self.assertIn('web', memory)\n self.assertEqual(memory['web'], '1G')\n # unset a value\n body = {'memory': json.dumps({'worker': None})}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n limit4 = response.data\n self.assertNotEqual(limit3['uuid'], limit4['uuid'])\n self.assertNotIn('worker', json.dumps(response.data['memory']))\n # disallow put/patch/delete\n self.assertEqual(self.client.put(url).status_code, 405)\n self.assertEqual(self.client.patch(url).status_code, 405)\n self.assertEqual(self.client.delete(url).status_code, 405)\n return limit4", "def _uppLim(self):\n if self.getResult(param='TS value')[0] >= self.tsmin:\n print(\"\\t=== TS value {} is above TSmin {}, no need to compute an upperlimit ===\"\n .format(self.getResult(param='TS value')[0], self.tsmin))\n return\n\n from UpperLimits import UpperLimits\n import UnbinnedAnalysis as UA\n \n like = UA.unbinnedAnalysis(evfile=self.outmktime, scfile=self.ft2, expmap=self.outexpmap,\n expcube=self.outltcube, irfs=self.irf, optimizer=\"NewMinuit\", srcmdl=self.model)\n like.fit(0)\n ul = UpperLimits(like)\n\n try:\n upp, norm=ul['TARGET'].bayesianUL(emin=self.emin, emax=self.emax, cl=0.95) \n except:\n upp = -1\n wf = open(self.outgtlike, 'a')\n wf.write(\"\\nUpper limit on source 'TARGET': {} ph/cm2/s.\".format(upp))\n wf.close()\n return", "def test_limits_query(self):\r\n owner = UserFactory.create()\r\n for i in range(30):\r\n app = AppFactory.create(owner=owner)\r\n task = TaskFactory(app=app)\r\n taskrun = TaskRunFactory(task=task)\r\n\r\n res = self.app.get('/api/app')\r\n data = json.loads(res.data)\r\n assert len(data) == 20, len(data)\r\n\r\n res = self.app.get('/api/app?limit=10')\r\n data = json.loads(res.data)\r\n assert len(data) == 10, len(data)\r\n\r\n res = self.app.get('/api/app?limit=10&offset=10')\r\n data = json.loads(res.data)\r\n assert len(data) == 10, len(data)\r\n assert data[0].get('name') == 'My App number 11', data[0]\r\n\r\n res = self.app.get('/api/task')\r\n data = json.loads(res.data)\r\n assert len(data) == 20, len(data)\r\n\r\n res = self.app.get('/api/taskrun')\r\n data = json.loads(res.data)\r\n assert len(data) == 20, len(data)\r\n\r\n UserFactory.create_batch(30)\r\n\r\n res = self.app.get('/api/user')\r\n data = json.loads(res.data)\r\n assert len(data) == 20, len(data)\r\n\r\n res = self.app.get('/api/user?limit=10')\r\n data = json.loads(res.data)\r\n print data\r\n assert len(data) == 10, len(data)\r\n\r\n res = self.app.get('/api/user?limit=10&offset=10')\r\n data = json.loads(res.data)\r\n assert len(data) == 10, len(data)\r\n assert data[0].get('name') == 'user11', data", "def limit(self, limit):\n self._limit = limit", "def _is_limited(request, rate, rl):\n def inner(*args, **kwargs):\n is_limited = rl.is_limited(*args, **kwargs)\n\n if is_limited:\n messages.error(\n request,\n _(\"Too many submissions, wait %(time)s.\") % {\n 'time': rate.split('/')[1]})\n\n return is_limited\n\n return inner", "def limit():\n bwc = BandwidthConfigurator()\n bwc.limit()", "def test_next_token(self) -> None:\n\n # `next_token` does not appear\n # Number of results is the number of entries\n channel = self.make_request(\n \"GET\",\n self.url + \"?limit=20\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 20)\n self.assertNotIn(\"next_token\", channel.json_body)\n\n # `next_token` does not appear\n # Number of max results is larger than the number of entries\n channel = self.make_request(\n \"GET\",\n self.url + \"?limit=21\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 20)\n self.assertNotIn(\"next_token\", channel.json_body)\n\n # `next_token` does appear\n # Number of max results is smaller than the number of entries\n channel = self.make_request(\n \"GET\",\n self.url + \"?limit=19\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 19)\n self.assertEqual(channel.json_body[\"next_token\"], 19)\n\n # Check\n # Set `from` to value of `next_token` for request remaining entries\n # `next_token` does not appear\n channel = self.make_request(\n \"GET\",\n self.url + \"?from=19\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 1)\n self.assertNotIn(\"next_token\", channel.json_body)", "def test_get_hyperflex_feature_limit_internal_by_moid(self):\n pass", "def chkLimits(name, value, Min, Max, unit = 'V', Hex = False):\n\n #global Log\n if not Min < value < Max:\n if Hex:\n line = \"%s:0x%X OUT OF LIMITS (0x%X, 0x%X). Test Failed !\" %(name, value, Min, Max)\n else:\n line = \"%s:%F %s OUT OF LIMITS (%F, %f). Test Failed !\" %(name, value, unit, Min, Max)\n Log.logError(line)\n Err.bumpError()\n return False\n if Hex:\n Log.logText(' '+'%s:0x%X expected range from:0x%X To: 0x%X. Test PASS !'% (name, value, Min, Max))\n else:\n Log.logText(' '+'%s:%F %s expected range From:%F %s To: %F %s. Test PASS !'% (name, value, unit, Min,unit, Max, unit))\n return True", "def test_user_03_respects_limit_tasks(self):\r\n # Del previous TaskRuns\r\n self.create()\r\n self.del_task_runs()\r\n\r\n assigned_tasks = []\r\n # We need one extra loop to allow the scheduler to mark a task as completed\r\n for i in range(11):\r\n self.register(fullname=self.user.username + str(i),\r\n name=self.user.username + str(i),\r\n password=self.user.username + str(i))\r\n self.signin()\r\n # Get Task until scheduler returns None\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n\r\n while data.get('info') is not None:\r\n # Check that we received a Task\r\n assert data.get('info'), data\r\n\r\n # Save the assigned task\r\n assigned_tasks.append(data)\r\n\r\n # Submit an Answer for the assigned task\r\n tr = dict(app_id=data['app_id'], task_id=data['id'],\r\n info={'answer': 'No'})\r\n tr = json.dumps(tr)\r\n self.app.post('/api/taskrun', data=tr)\r\n\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n self.signout()\r\n\r\n # Check if there are 30 TaskRuns per Task\r\n tasks = db.session.query(Task).filter_by(app_id=1).all()\r\n for t in tasks:\r\n assert len(t.task_runs) == 10, t.task_runs\r\n # Check that all the answers are from different IPs\r\n err_msg = \"There are two or more Answers from same User\"\r\n for t in tasks:\r\n for tr in t.task_runs:\r\n assert self.is_unique(tr.user_id, t.task_runs), err_msg\r\n # Check that task.state is updated to completed\r\n for t in tasks:\r\n assert t.state == \"completed\", t.state", "def test_store_elements_count_exceed_default_limit(self, mocker):\n proxy = mocker.patch('saana_lib.ranking.RankingToDatabase.proxy')\n _compute = mocker.patch('saana_lib.ranking.Ranking.compute')\n\n _compute.return_value = dict((i, list(range(5))) for i in range(10))\n self.klass.store()\n assert proxy.call_count == 20", "def test_search_result_limit(self):\n results = self.searcher.search(\"crossfit\", 1)\n expected_results = 6\n\n self.assertEqual(results[0].indexable.docid, expected_results)", "def test_limit(db_session):\n query_params = {\"limit\": \"1\"}\n parser = ModelQueryParamParser(query_params)\n album_resource = AlbumResource(session=db_session)\n offset_limit_info = parser.parse_offset_limit(page_max_size=30)\n offset = offset_limit_info.offset\n limit = offset_limit_info.limit\n result = album_resource.get_collection(\n filters=parser.parse_filters(album_resource.model),\n sorts=parser.parse_sorts(),\n limit=limit,\n offset=offset\n )\n assert len(result) == 1", "def test_max_begin(self):\n self.assertEqual(max_integer([5, 3, 4, 1]), 5)", "def test_request_limit_no_outer(self):\n httpretty.register_uri(httpretty.POST, 'http://somewhere.com/test')\n r = CkanResource('http://somewhere.com/test', None, {'offset': None, 'limit': None})\n s = r._get_response(10, 200)\n self._assert_params_equals(httpretty.last_request().path, {'offset': 2000, 'limit': 200})", "def _safe_limit_check(self):\n if self.rem == 40:\n self.time_start = time.time()\n elif time.time() - self.time_start >= 11:\n self.rem = 40\n self.time_start = time.time()\n elif self.rem <= 0:\n t = 11 - (time.time() - self.time_start)\n\n if t <= 0:\n self.rem = 40\n self.time_start = time.time()\n else:\n if self.policy == Limit.Sleep:\n time.sleep(t)\n elif self.policy == Limit.Ignore:\n return False\n\n self.rem -= 1\n return True", "def test_set_maximum(self):\n self.server_widget.maximum = 1000\n assert self.client_widget.maximum == self.server_widget.maximum", "def test_get_limit_4_dependants(self):\n self.assertEqual(\n gross_income.get_limit(dependant_children=4),\n gross_income.BASE_LIMIT\n )", "def test_i(name, expectation, limit_name, limit_value, result):\n kwargs = {'batch_size': 2, limit_name: limit_value, 'lazy': True}\n\n pipeline = (Dataset(10).pipeline()\n .init_variable('var', -1)\n .update_variable('var', I(name), mode='w')\n .run(**kwargs)\n )\n\n with expectation:\n _ = pipeline.next_batch()\n\n assert pipeline.get_variable('var') == result", "def test_max_end(self):\n self.assertEqual(max_integer([5, 3, 4, 8]), 8)", "def le(value, limit):\n return value <= limit", "def test_10(self):\n assert 'False' == Api.requestBlock('test-10')", "def test_pickle_limit_continue(self):\n l = []\n for i in range(0, 30):\n l.append(i)\n self.plugin.save_data(l, 10)\n l = self.plugin.load_data()\n self.assertEqual(20, l[0])", "def test_recv_nolimit(self):\n self.driver.send_nolimit(self.msg_long)\n msg_flag, msg_recv = self.instance.recv_nolimit(self.timeout)\n assert(msg_flag)\n nt.assert_equal(msg_recv, self.msg_long)", "def test_own_count(self):\n self._test_count_func(it_count)", "def test_inrange():\n assert cs.any > 0\n assert cs.any < cmax", "def test_38(self):\n assert 'True' == Api.requestBlock('test-38')", "def test_05_vmcp(self):\r\n url = '/api/vmcp'\r\n self.check_limit(url, 'get', 'app')", "def test_40(self):\n assert 'False' == Api.requestBlock('test-40')" ]
[ "0.70281196", "0.6930952", "0.6867976", "0.63591504", "0.6339935", "0.63310546", "0.63143235", "0.63078123", "0.62953484", "0.62816274", "0.62538594", "0.62326956", "0.61941534", "0.61918557", "0.6187162", "0.61759824", "0.61723286", "0.61669195", "0.6164165", "0.6162524", "0.61611265", "0.6146944", "0.6142716", "0.6133879", "0.6119211", "0.60341835", "0.60208964", "0.5968592", "0.5953195", "0.5946754", "0.59318113", "0.59171796", "0.59157795", "0.5915592", "0.5875401", "0.5865881", "0.58391076", "0.583474", "0.58139855", "0.5813863", "0.5802101", "0.57806057", "0.5769262", "0.57658035", "0.5744687", "0.5740885", "0.5734131", "0.57240736", "0.5722148", "0.5715519", "0.57086694", "0.5707055", "0.5701681", "0.56732297", "0.5670488", "0.5664628", "0.56638956", "0.566376", "0.5659666", "0.5646996", "0.5645508", "0.5643861", "0.5639961", "0.56363785", "0.56305474", "0.56289625", "0.5628635", "0.5626513", "0.5624063", "0.56215096", "0.5620804", "0.561458", "0.56115365", "0.55967593", "0.55938995", "0.5586367", "0.55802053", "0.55789655", "0.557762", "0.55722386", "0.55664337", "0.55657303", "0.5561513", "0.55597335", "0.55433506", "0.55289924", "0.55265385", "0.5524921", "0.5523284", "0.55166", "0.5513797", "0.55137587", "0.55052894", "0.5502799", "0.54971075", "0.54959947", "0.5481013", "0.54793483", "0.5478245", "0.54778314" ]
0.62189835
12
Test limit with completed Aqueduct.
def test_limit_with_aqueduct(self): d = TestDeck() g = test_setup.simple_two_player() p1, p2 = g.players self.assertEqual(g._clientele_limit(p1), 2) p1.buildings.append(Building(d.aqueduct, 'Concrete', complete=True)) self.assertEqual(g._clientele_limit(p1), 4) p1.influence = ['Stone'] self.assertEqual(g._clientele_limit(p1), 10)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_environmental_impact_compliance():\n emissions = 12000\n legal_limit = 300\n assert emissions < legal_limit", "def test_get_remain_limit(self):\n finder = FinderInsidePro(self.test_key)\n limit = finder.get_remain_limit()\n assert isinstance(limit, int)\n assert limit > 0", "def test_identify_limit(limit, all, expected):\n assert identify_limit(limit, all) == expected", "def test_limit_with_insula_and_aqueduct(self):\n d = TestDeck()\n\n g = test_setup.simple_two_player()\n\n p1, p2 = g.players\n\n self.assertEqual(g._clientele_limit(p1), 2)\n\n p1.buildings.append(Building(d.aqueduct, 'Concrete', complete=True))\n p1.buildings.append(Building(d.insula, 'Rubble', complete=True))\n\n self.assertEqual(g._clientele_limit(p1), 8)\n\n p1.influence = ['Stone']\n\n self.assertEqual(g._clientele_limit(p1), 14)", "def test_deploy_more_vms_than_limit_allows(self):\n self.test_limits(vm_limit=2)", "def calculate(self, limit):\r\n pass", "def test_update_instance_limit(self):\n pass", "def test_update_hyperflex_feature_limit_internal(self):\n pass", "def test_set_project_limits(self):\n pass", "def test_create_hyperflex_feature_limit_internal(self):\n pass", "def test_collection_limit(testapp):\n obj1 = {\n 'title': \"Testing1\",\n 'description': \"This is testing object 1\",\n }\n obj2 = {\n 'title': \"Testing2\",\n 'description': \"This is testing object 2\",\n }\n obj3 = {\n 'title': \"Testing3\",\n 'description': \"This is testing object 3\",\n }\n testapp.post_json('/embedding-tests', obj1, status=201)\n testapp.post_json('/embedding-tests', obj2, status=201)\n testapp.post_json('/embedding-tests', obj3, status=201)\n res_all = testapp.get('/embedding-tests/?limit=all', status=200)\n res_2 = testapp.get('/embedding-tests/?limit=2', status=200)\n assert len(res_all.json['@graph']) == 3\n assert len(res_2.json['@graph']) == 2", "def calculate(self, limit):\n pass", "def test_capacity_factor(pudl_out_eia):\n print(\"\\nCalculating generator capacity factors...\")\n cf = pudl_out_eia.capacity_factor()\n print(f\" capacity_factor: {len(cf)} records\")", "def test_get_limit_no_dependants(self):\n self.assertEqual(\n gross_income.get_limit(),\n gross_income.BASE_LIMIT\n )", "def test_initial_limit(self):\n\n g = test_setup.simple_two_player()\n\n p1, p2 = g.players\n\n self.assertEqual(g._clientele_limit(p1), 2)\n self.assertEqual(g._clientele_limit(p2), 2)", "def test_limit(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url + \"?limit=5\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 5)\n self.assertEqual(channel.json_body[\"next_token\"], 5)\n self._check_fields(channel.json_body[\"event_reports\"])", "def test_update_hyperflex_feature_limit_external(self):\n pass", "def test_limits(manager):\n manager.update(days=40)\n compare_results_attrs(manager.items, fixtures.FIXTURES[51])", "def testVerifyArbitraryLimits(self):\n\t\tpolicy = MinimumPlaybackPolicy(3)\n\t\tfor x in range(0, 3):\n\t\t\tself.failIf(policy.hasBeenPlayedBack)\n\t\t\tself.failIf(policy.isReadyForRemoval)\n\t\t\tpolicy.playback()\n\t\tself.failUnless(policy.hasBeenPlayedBack)\n\t\tself.failIf(policy.isReadyForRemoval)", "def test_limits_boundary_values(self):\n\n def check_error_msg(status, output, storagelimit=False):\n import json\n if status == False:\n content = json.loads(output)[\"errors\"]\n if storagelimit:\n actual_error = content[\"dataStorageLimit\"]\n expected_error = '\"dataStorageLimit\" must be an integer between -1 and 100000'\n else:\n actual_error = content[\"dataThrottleLimit\"]\n expected_error = '\"dataThrottleLimit\" must be an integer between -1 and 2147483647'\n self.assertEqual(actual_error, expected_error)\n else:\n self.fail(\"expected to fail but passsed\")\n\n bucket = self.cluster.buckets[0]\n server = random.choice(bucket.servers)\n bucket_helper = BucketHelper(server)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=-2)\n check_error_msg(status, content)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=2147483648)\n check_error_msg(status, content)\n\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n storage_limit=-2)\n check_error_msg(status, content, True)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n storage_limit=2147483648)\n check_error_msg(status, content, True)\n\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=-2,\n storage_limit=-2)\n check_error_msg(status, content)\n check_error_msg(status, content, True)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=2147483648,\n storage_limit=2147483648)\n check_error_msg(status, content)\n check_error_msg(status, content, True)", "def test_exceeded_limit(self):\n msg=self.sample_data(\"error_exceeded_limit.xml\")\n error = ErrorParser().process_all(msg)\n assert isinstance(error, PatronLoanLimitReached)\n eq_(u'Patron cannot loan more than 12 documents', error.message)", "def __init__(self, reason, lim=0):\n self.successes = 0\n self.tests = 0\n self.reason = reason\n self.limit = lim", "def test_launch_assignments_with_concurrent_unit_cap(self):\n cap_values = [1, 2, 3, 4, 5]\n for max_num_units in cap_values:\n mock_data_array = self.get_mock_assignment_data_array()\n launcher = TaskLauncher(\n self.db,\n self.task_run,\n mock_data_array,\n max_num_concurrent_units=max_num_units,\n )\n launcher.launched_units = LimitedDict(launcher.max_num_concurrent_units)\n launcher.create_assignments()\n launcher.launch_units(\"dummy-url:3000\")\n\n start_time = time.time()\n while set([u.get_status() for u in launcher.units]) != {AssignmentState.COMPLETED}:\n for unit in launcher.units:\n if unit.get_status() == AssignmentState.LAUNCHED:\n unit.set_db_status(AssignmentState.COMPLETED)\n time.sleep(0.1)\n self.assertEqual(launcher.launched_units.exceed_limit, False)\n curr_time = time.time()\n self.assertLessEqual(curr_time - start_time, MAX_WAIT_TIME_UNIT_LAUNCH)\n launcher.expire_units()\n self.tearDown()\n self.setUp()", "def test_check_cost():", "def test_25(self):\n assert 'False' == Api.requestBlock('test-25')", "def test_update_instance_limit1(self):\n pass", "def test_fail_on_rate_limit_exceeded(self):\n\n # setup 'short' limit for testing\n self.client.protocol.rate_limiter.rules = []\n self.client.protocol.rate_limiter.rules.append(\n XRateLimitRule(\n {\n \"short\": {\n \"usage\": 0,\n \"limit\": 600,\n \"time\": 5,\n \"lastExceeded\": None,\n },\n \"long\": {\n \"usage\": 0,\n \"limit\": 30000,\n \"time\": 5,\n \"lastExceeded\": None,\n },\n }\n )\n )\n\n # interact with api to get the limits\n self.client.get_athlete()\n\n # access the default rate limit rule\n rate_limit_rule = self.client.protocol.rate_limiter.rules[0]\n\n # get any of the rate limits, ex the 'short'\n limit = rate_limit_rule.rate_limits[\"short\"]\n\n # get current usage\n usage = limit[\"usage\"]\n print(\"last rate limit usage is {0}\".format(usage))\n\n # for testing purpses set the limit to usage\n limit[\"limit\"] = usage\n print(\"changing limit to {0}\".format(limit[\"limit\"]))\n\n # expect exception because of RateLimit has been\n # exceeded (or reached max)\n with self.assertRaises(exc.RateLimitExceeded):\n self.client.get_athlete()\n\n # request fired to early (less than 5 sec) causes timeout exception\n with self.assertRaises(exc.RateLimitTimeout):\n self.client.get_athlete()\n\n # once rate limit has exceeded wait until another request is possible\n # check if timeout has been set\n self.assertTrue(rate_limit_rule.limit_timeout > 0)\n print(\"limit timeout {0}\".format(rate_limit_rule.limit_timeout))\n\n # resetting limit\n # simulates Strava api - it would set the usage again to 0\n limit[\"limit\"] = 600\n print(\"resetting limit to {0}\".format(limit[\"limit\"]))\n\n try:\n # waiting until timeout expires\n time.sleep(5)\n\n # this time it should work again\n self.client.get_athlete()\n self.assertTrue(\"No exception raised\")\n except exc.RateLimitExceeded as e:\n self.fail(\"limiter raised RateLimitTimeout unexpectedly!\")\n\n # continue other tests with DefaultRateLimiter\n print(\"setting default rate limiter\")\n self.client.protocol.rate_limiter = DefaultRateLimiter()", "def limit(self, limit):\n raise NotImplementedError(\"This should have been implemented.\")", "def test_get_speed_limit():\n center = Coordinates(1 , 1)\n radius = 10\n speed_limit = 20\n\n assert get_speed_limit(center, radius, speed_limit) != center\n assert get_speed_limit(center, radius, speed_limit) != radius\n assert get_speed_limit(center, radius, speed_limit) == speed_limit", "def test_it(self):\n self.n += 1\n if self.n >= 5:\n self.fail(\"eventually failing\")", "def test_05_user_progress(self):\r\n url = '/api/app/1/userprogress'\r\n self.check_limit(url, 'get', 'app')", "def test_api_requests_limited(self):\n\n did_reach_rate_limit = False\n for _ in range(110):\n response = self.send_get('Participant', expected_status=None)\n if response.status_code == TooManyRequests.code:\n did_reach_rate_limit = True\n break\n\n self.assertTrue(did_reach_rate_limit)", "def calculate(self, limit: int) -> None:\n raise NotImplementedError()", "def test_counter_proposal_offer(self):\n pass", "def test_communities_created_limit(self):\n self.login_as(\"ben\")\n for i in range(settings.QUIZZZ_CREATED_COMMUNITIES_LIMIT):\n response = self.client.post(self.url, {\"name\": f\"test-group-{i}\"})\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n response = self.client.post(self.url, {\"name\": \"pushthelimit\"})\n self.assert_validation_failed(response, \n data=[\"You have reached the limit for communities created.\"])", "def test_maximum_items(self):\n total = 4711\n self.es.set_maximum_items(total)\n self.assertEqual(self.es._total, total)", "def testsize(self):\n for size in range(5):\n a = AmuletAbility('Skepticism', size=size+1)\n self.assert_(str(size+1) in str(a))\n self.assertEqual(a.size, size+1)\n self.assertTrue(isinstance(a.AC, int))\n self.assertTrue(isinstance(a.description(), str))", "def test_create_hyperflex_feature_limit_external(self):\n pass", "def test_30(self):\n assert 'False' == Api.requestBlock('test-30')", "def test_mem_limit_too_high():\n args = argparse.Namespace(cfg=os.path.join(TEST_DATA_DIR, 'mem-limit-too-high.ini'))\n with pytest.raises(UserReportError) as err:\n cfg = ElasticBlastConfig(configure(args), task = ElbCommand.SUBMIT)\n assert err.value.returncode == INPUT_ERROR\n m = re.match(r'Memory limit.*exceeds', err.value.message)\n assert m is not None", "def STAND_LIMIT() -> int:\n return 15", "def test_exceed_limit_request(self):\n actions.login(ADMIN_EMAIL)\n ids_list = list(range(SkillAggregateRestHandler.MAX_REQUEST_SIZE))\n get_url = '%s?%s' % (self.URL, urllib.urlencode({\n 'ids': ids_list}, True))\n\n response = transforms.loads(self.get(get_url).body)\n self.assertEqual(412, response['status'])", "def test_module_example(self, tol):\n\n # Defines the wires and the graph on which MaxCut is being performed\n wires = range(3)\n graph = Graph([(0, 1), (1, 2), (2, 0)])\n\n # Defines the QAOA cost and mixer Hamiltonians\n cost_h, mixer_h = qaoa.maxcut(graph)\n\n # Defines a layer of the QAOA ansatz from the cost and mixer Hamiltonians\n def qaoa_layer(gamma, alpha):\n qaoa.cost_layer(gamma, cost_h)\n qaoa.mixer_layer(alpha, mixer_h)\n\n # Repeatedly applies layers of the QAOA ansatz\n def circuit(params, **kwargs):\n for w in wires:\n qml.Hadamard(wires=w)\n\n qml.layer(qaoa_layer, 2, params[0], params[1])\n\n # Defines the device and the QAOA cost function\n dev = qml.device(\"default.qubit\", wires=len(wires))\n cost_function = qml.ExpvalCost(circuit, cost_h, dev)\n\n res = cost_function([[1, 1], [1, 1]])\n expected = -1.8260274380964299\n\n assert np.allclose(res, expected, atol=tol, rtol=0)", "def _check_capacity_limit(self, res, amt, balance, meta, raven_vars, dispatch, t):\n cap = self.get_capacity(meta, raven_vars, dispatch, t)[0][self._capacity_var]\n try:\n if abs(balance[self._capacity_var]) > abs(cap):\n #ttttt\n # do the inverse problem: how much can we make?\n balance, meta = self.produce_max(meta, raven_vars, dispatch, t)\n print('The full requested amount ({res}: {req}) was not possible, so accessing maximum available instead ({res}: {blc}).'.format(res=res, req=amt, blc=balance[res]))\n except KeyError:\n raise SyntaxError('Resource \"{}\" is listed as capacity limiter, but not an output of the component! Got: {}'.format(self._capacity_var, balance))\n return balance, meta", "def test_05_vmcp(self):\r\n url = '/api/vmcp'\r\n self.check_limit(url, 'get', 'app')", "def test_huge_answers(self):\n self.init_player(\n '0', 'Welcome to Oppia!', 'do you know where the name \\'Oppia\\'')\n self.submit_and_compare(\n '0', '', 'In fact, the word Oppia means \\'learn\\'.')\n # This could potentially cause errors in stats_models when the answer\n # is persisted to the backend.\n self.submit_and_compare(\n 'a' * 1000500, 'Sorry, nope, we didn\\'t get it', '')", "def testsize(self):\n for size in range(5):\n AttributeAbility(size=size + 1)", "def test_composition_adds_to_100_percent(self):", "def test_change_provisioned_throughput_usual_case():", "def test_operate_resource_cap_max(self, on):\n\n if on is False:\n override = {}\n else:\n override = {\"techs.test_supply_plus.constraints.resource_cap_max\": 1e6}\n m = build_model(\n override, \"simple_supply_and_supply_plus,operate,investment_costs\"\n )\n\n with pytest.warns(exceptions.ModelWarning) as warning:\n m.run(build_only=True)\n if on is False:\n assert check_error_or_warning(\n warning, \"Resource capacity constraint defined and set to infinity\"\n )\n assert np.isinf(\n m._model_data.resource_cap.loc[\"a\", \"test_supply_plus\"].item()\n )\n elif on is True:\n assert not check_error_or_warning(\n warning, \"Resource capacity constraint defined and set to infinity\"\n )\n assert m._model_data.resource_cap.loc[\"a\", \"test_supply_plus\"].item() == 1e6", "def test_limit_cpu(self):\n url = '/api/apps'\n body = {'cluster': 'autotest'}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n app_id = response.data['id']\n url = '/api/apps/{app_id}/limits'.format(**locals())\n # check default limit\n response = self.client.get(url, content_type='application/json')\n self.assertEqual(response.status_code, 200)\n self.assertIn('cpu', response.data)\n self.assertEqual(json.loads(response.data['cpu']), {})\n # regression test for https://github.com/deis/deis/issues/1563\n self.assertNotIn('\"', response.data['cpu'])\n # set an initial limit\n body = {'cpu': json.dumps({'web': '1024'})}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n self.assertIn('x-deis-release', response._headers)\n limit1 = response.data\n # check memory limits\n response = self.client.get(url, content_type='application/json')\n self.assertEqual(response.status_code, 200)\n self.assertIn('cpu', response.data)\n cpu = json.loads(response.data['cpu'])\n self.assertIn('web', cpu)\n self.assertEqual(cpu['web'], '1024')\n # set an additional value\n body = {'cpu': json.dumps({'worker': '512'})}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n limit2 = response.data\n self.assertNotEqual(limit1['uuid'], limit2['uuid'])\n cpu = json.loads(response.data['cpu'])\n self.assertIn('worker', cpu)\n self.assertEqual(cpu['worker'], '512')\n self.assertIn('web', cpu)\n self.assertEqual(cpu['web'], '1024')\n # read the limit again\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n limit3 = response.data\n self.assertEqual(limit2, limit3)\n cpu = json.loads(response.data['cpu'])\n self.assertIn('worker', cpu)\n self.assertEqual(cpu['worker'], '512')\n self.assertIn('web', cpu)\n self.assertEqual(cpu['web'], '1024')\n # unset a value\n body = {'memory': json.dumps({'worker': None})}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n limit4 = response.data\n self.assertNotEqual(limit3['uuid'], limit4['uuid'])\n self.assertNotIn('worker', json.dumps(response.data['memory']))\n # disallow put/patch/delete\n self.assertEqual(self.client.put(url).status_code, 405)\n self.assertEqual(self.client.patch(url).status_code, 405)\n self.assertEqual(self.client.delete(url).status_code, 405)\n return limit4", "def test_max_members(self):\n self.login_as(self.USER)\n\n group_members = Membership.objects.filter(community_id=self.GROUP_ID).count()\n Community.objects.filter(pk=self.GROUP_ID).update(max_members=group_members)\n \n with self.assertNumQueries(5):\n response = self.client.post(self.url, self.payload)\n self.assert_validation_failed(response, data={\n \"non_field_errors\": [\"This group has reached its member limit.\"]\n })\n self.assertEqual(Membership.objects.count(), self.num_memberships)", "def test(self):\n return test_throttle_method()", "def test_get_limit_4_dependants(self):\n self.assertEqual(\n gross_income.get_limit(dependant_children=4),\n gross_income.BASE_LIMIT\n )", "def testEnsurePlaybacksAreLimited(self):\n\t\tpolicy = FixedCountPolicy()\n\t\tself.failIf(policy.hasUnlimitedPlaybacks)", "def test_40(self):\n assert 'False' == Api.requestBlock('test-40')", "def test_custom_per_project_upper_limit(self):\n data = {'payment_amount': '50.00'}\n account = Account(goal=8000, current=3001)\n form = DonationAmountForm(data=data, account=account)\n self.assertFalse(form.is_valid())\n errors = form.errors.as_data()\n self.assertEqual('max_value', errors['payment_amount'][0].code)\n self.assertTrue('$49.99' in errors['payment_amount'][0].message)\n\n account.current = 3000\n form = DonationAmountForm(data=data, account=account)\n self.assertTrue(form.is_valid())", "def test_default_limit(self):\n telem = self.create_logs(self.user1, num=200)\n\n response = self.client.get(telemetry_url)\n self.assertEqual(200, response.status_code)\n\n data = json.loads(response.content)\n\n self.assertEqual(100, len(data))", "async def test_exectution_limit_throttle(coresys: CoreSys, loop: asyncio.BaseEventLoop):\n\n class TestClass:\n \"\"\"Test class.\"\"\"\n\n def __init__(self, coresys: CoreSys):\n \"\"\"Initialize the test class.\"\"\"\n self.coresys = coresys\n self.run = asyncio.Lock()\n self.call = 0\n\n @Job(limit=JobExecutionLimit.THROTTLE, throttle_period=timedelta(hours=1))\n async def execute(self, sleep: float):\n \"\"\"Execute the class method.\"\"\"\n assert not self.run.locked()\n async with self.run:\n await asyncio.sleep(sleep)\n self.call += 1\n\n test = TestClass(coresys)\n\n await asyncio.gather(*[test.execute(0.1), test.execute(0.1), test.execute(0.1)])\n assert test.call == 1\n\n await asyncio.gather(*[test.execute(0.1)])\n assert test.call == 1", "def test_38(self):\n assert 'True' == Api.requestBlock('test-38')", "def test_10(self):\n assert 'False' == Api.requestBlock('test-10')", "def test_50(self):\n assert 'False' == Api.requestBlock('test-50', CustomFields=True)", "def test_valid_calculation_of_quantile(alpha: Any) -> None:\n n = 30\n check_alpha_and_n_samples(alpha, n)", "def test_39(self):\n assert 'True' == Api.requestBlock('test-39')", "async def test_exectution_limit_throttle_wait(\n coresys: CoreSys, loop: asyncio.BaseEventLoop\n):\n\n class TestClass:\n \"\"\"Test class.\"\"\"\n\n def __init__(self, coresys: CoreSys):\n \"\"\"Initialize the test class.\"\"\"\n self.coresys = coresys\n self.run = asyncio.Lock()\n self.call = 0\n\n @Job(limit=JobExecutionLimit.THROTTLE_WAIT, throttle_period=timedelta(hours=1))\n async def execute(self, sleep: float):\n \"\"\"Execute the class method.\"\"\"\n assert not self.run.locked()\n async with self.run:\n await asyncio.sleep(sleep)\n self.call += 1\n\n test = TestClass(coresys)\n\n await asyncio.gather(*[test.execute(0.1), test.execute(0.1), test.execute(0.1)])\n assert test.call == 1\n\n await asyncio.gather(*[test.execute(0.1)])\n assert test.call == 1", "def test_limit(self):\n\t\tfor lim in [1, '234', -100, '-200']:\n\t\t\tself.filter.set_limit(lim)\n\t\t\tself.assertEqual(int(lim), self.filter.get_limit(), \"Limit mismatch: %s!=%s\" % (lim, self.filter.get_limit()))\n\t\tself.filter.set_limit('test')\n\t\tself.assertEqual('test', self.filter.get_limit(), \"String set failed for Filter limit.\")", "def test_returns_limit_projects(self):\n # Arrange\n # Create and arrange test projects\n self.arrange_projects()\n # Act\n response = self.client.get(\n f\"{self.url}?limit=1\", headers={\"Authorization\": self.user_session_token}\n )\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json[\"results\"]), 1)", "def test(self):\n return self._test(result_count=1, failure_amount=1)", "def test(self):\n return self._test(result_count=1, failure_amount=1)", "def test_get_damage_out_of_limit(self):\n self.veh.health = 0.24\n for op in self.veh.operators:\n op.health = 0.1\n self.veh.get_damage(0.5)\n self.assertEqual(self.veh.health, 0)\n self.assertEqual(self.veh.operators[0].health, 0.05)\n self.assertEqual(self.veh.operators[1].health, 0.05)", "def test_sad_purchasePlaces_12_places_max__step_by_step(self):\n\n club_index = self.add_fake_club(points=100)\n\n print(\"INIT:\", self.competitions, self.clubs)\n\n points = int(self.clubs[club_index][\"points\"])\n slots = int(self.competitions[0][\"numberOfPlaces\"])\n booked = 0\n\n num_actions = 12 + 1\n\n for i in range(1, num_actions + 1):\n rv = self.app.post(\n \"/purchasePlaces\",\n data={\n \"places\": 1,\n \"club\": self.clubs[club_index][\"name\"],\n \"competition\": self.competitions[0][\"name\"],\n },\n )\n\n booked += 1\n print(i, \"\\n\", rv.data, rv.status_code, \"\\n\", server.booking)\n\n if i < num_actions - 1:\n cost = points - (self.cost_per_place * booked)\n assert rv.status_code in [200]\n assert str.encode(f\"Number of Places: {slots-booked}\") in rv.data\n assert str.encode(f\"Points available: {cost}\") in rv.data\n\n assert rv.status_code in [400]\n assert b\"You can&#39;t book more than 12 places per competition\" in rv.data", "def _test_out_of_range(self):\n self.cdbconf.setup('KKG')\n self.cdbconf.setConfiguration('CUSTOM_OPT')\n az, el, latitude = [radians(50)] * 3\n site_info = {'latitude': latitude}\n self.p.setup(site_info, self.source, self.device)\n self.p.setRewindingMode('AUTO')\n offset = 20\n max_limit = self.device.getMaxLimit() \n min_limit = self.device.getMinLimit()\n Pis = max_limit - offset/2\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.p.setPosition(Pis)\n time.sleep(0.2) # Wait a bit for the setup\n max_rewinding_steps = (max_limit - min_limit) // self.device.getStep()\n expected = Pis - max_rewinding_steps*self.device.getStep() + offset\n self.source.setAzimuth(az)\n self.source.setElevation(el)\n self.p.startUpdating('MNG_TRACK', 'ANT_NORTH', az, el, None, None)\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.p.setOffset(offset)\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.assertEqual(self.device.getActPosition(), expected)", "def test_20(self):\n assert 'False' == Api.requestBlock('test-20')", "def test_results_throttling(self, affiliate_items):\n call_count = 0\n\n def throttle_initial(*args, **kwargs):\n nonlocal call_count\n call_count += 1\n if call_count < 4:\n raise ThrottlingError()\n\n update_function = mock.Mock(side_effect=throttle_initial)\n batch_job = BatchJob(affiliate_items, update_function)\n\n with mock.patch('chiton.rack.affiliates.bulk.sleep') as delay_function:\n for result in batch_job.run():\n pass\n\n assert delay_function.call_count == 3\n assert update_function.call_count == 7", "def test_set_glass_capacity__with_valid_numbers__returns_expected():\n glass = moet.create_glass(\"A\")\n numbers = [0, 1, 250, 0.0, 100.5]\n for number in numbers:\n glass.capacity = number\n assert glass.capacity == number", "def test_allowed_result_sizes(fxc, endpoint, size):\n fn_uuid = fxc.register_function(\n large_result_producer, endpoint, description=\"LargeResultProducer\"\n )\n task_id = fxc.run(\n size, # This is the current result size limit\n endpoint_id=endpoint,\n function_id=fn_uuid,\n )\n\n x = wait_for_task(fxc, task_id, walltime=10)\n assert len(x) == size, \"Result size does not match excepted size\"", "def test_31(self):\n assert 'True' == Api.requestBlock('test-31')", "def test_update_virtualization_realm_maximum_impact_level(self):\n pass", "def test_patch_hyperflex_feature_limit_internal(self):\n pass", "def test_remain():\r\n global pickno\r\n #Change pick number to the total amount of balls\r\n # Ex. If we have 3 balls remaining the user cannot pick 4\r\n if total <= 4:\r\n pickno = total", "def _check_capacity_limit(self, res, amt, balance, meta, raven_vars, dispatch, t, level):\n # note \"amt\" has units of AMOUNT not RATE (resource, not resource per second)\n sign = np.sign(amt)\n # are we storing or providing?\n #print('DEBUGG supposed current level:', level)\n if sign < 0:\n # we are being asked to consume some\n cap, meta = self.get_capacity(meta, raven_vars, dispatch, t)\n available_amount = cap[res] - level\n #print('Supposed Capacity, Only calculated ins sign<0 (being asked to consumer)',cap)\n else:\n # we are being asked to produce some\n available_amount = level\n # the amount we can consume is the minimum of the requested or what's available\n delta = sign * min(available_amount, abs(amt))\n return {res: delta}, meta", "def test_37(self):\n assert 'False' == Api.requestBlock('test-37')", "def test_request_limit_inner_larger(self):\n httpretty.register_uri(httpretty.POST, 'http://somewhere.com/test')\n r = CkanResource('http://somewhere.com/test', None, {'offset': 100, 'limit': 100})\n r._get_response(10, 200)\n self._assert_params_equals(httpretty.last_request().path, {'offset': 2100, 'limit': 100})", "def test_limit_minus(self, mock):\n mock.configure_mock(**(self.config_payload(1, 1)))\n d = lf.lambda_handler(event=self.lambdaevent_minus, context=None)\n self.assertEqual(d, 0)\n mock.client.return_value.update_thing_shadow.assert_called_once_with(\n thingName=self.thingname,\n payload=lf.payload_put(lf.shadow_update_data))", "def test_some_meet(self, initial_placement_fixture):\n assert len(ctx.cluster.influx_db.aggregate_performance()) == 0, \\\n \"Test should run on the basic model\"\n self.generic_function(above_objective=2)", "def test_store_elements_count_exceed_default_limit(self, mocker):\n proxy = mocker.patch('saana_lib.ranking.RankingToDatabase.proxy')\n _compute = mocker.patch('saana_lib.ranking.Ranking.compute')\n\n _compute.return_value = dict((i, list(range(5))) for i in range(10))\n self.klass.store()\n assert proxy.call_count == 20", "def test_delete_hyperflex_feature_limit_internal(self):\n pass", "def test_limit_memory(self):\n url = '/api/apps'\n body = {'cluster': 'autotest'}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n app_id = response.data['id']\n url = '/api/apps/{app_id}/limits'.format(**locals())\n # check default limit\n response = self.client.get(url, content_type='application/json')\n self.assertEqual(response.status_code, 200)\n self.assertIn('memory', response.data)\n self.assertEqual(json.loads(response.data['memory']), {})\n # regression test for https://github.com/deis/deis/issues/1563\n self.assertNotIn('\"', response.data['memory'])\n # set an initial limit\n mem = {'web': '1G'}\n body = {'memory': json.dumps(mem)}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n self.assertIn('x-deis-release', response._headers)\n limit1 = response.data\n # check memory limits\n response = self.client.get(url, content_type='application/json')\n self.assertEqual(response.status_code, 200)\n self.assertIn('memory', response.data)\n memory = json.loads(response.data['memory'])\n self.assertIn('web', memory)\n self.assertEqual(memory['web'], '1G')\n # set an additional value\n body = {'memory': json.dumps({'worker': '512M'})}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n limit2 = response.data\n self.assertNotEqual(limit1['uuid'], limit2['uuid'])\n memory = json.loads(response.data['memory'])\n self.assertIn('worker', memory)\n self.assertEqual(memory['worker'], '512M')\n self.assertIn('web', memory)\n self.assertEqual(memory['web'], '1G')\n # read the limit again\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n limit3 = response.data\n self.assertEqual(limit2, limit3)\n memory = json.loads(response.data['memory'])\n self.assertIn('worker', memory)\n self.assertEqual(memory['worker'], '512M')\n self.assertIn('web', memory)\n self.assertEqual(memory['web'], '1G')\n # unset a value\n body = {'memory': json.dumps({'worker': None})}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n limit4 = response.data\n self.assertNotEqual(limit3['uuid'], limit4['uuid'])\n self.assertNotIn('worker', json.dumps(response.data['memory']))\n # disallow put/patch/delete\n self.assertEqual(self.client.put(url).status_code, 405)\n self.assertEqual(self.client.patch(url).status_code, 405)\n self.assertEqual(self.client.delete(url).status_code, 405)\n return limit4", "def test_container_get_progress(self):\r\n progress = self.combinedoe_container.max_score()\r\n self.assertEqual(progress, None)", "def test_max(doctest):", "def test_rate_limited(self):\n response = self._mock_utility(get_kwargs=self._data(),\n error=fitbit_exceptions.HTTPConflict)\n self._check_response(response, 105)", "def test_29(self):\n assert 'True' == Api.requestBlock('test-29')", "def test_set_maximum(self):\n self.server_widget.maximum = 1000\n assert self.client_widget.maximum == self.server_widget.maximum", "def test_query_train_jobs_with_exceeded_limit(self, client):\n params = dict(offset=0, limit=1000)\n url = get_url(BASE_URL, params)\n response = client.get(url)\n result = response.get_json()\n assert result.get('error_code') == '50540002'", "def test_calculate_number_page_number_products_max_different(self):\n self.requestapi = RequestApi('snack', number_products_max=1000)\n self.requestapi.number_products = 1000\n result = self.requestapi.calculate_number_page()\n self.assertEqual(result, 10)", "def test_request_limit_overflow(self):\n httpretty.register_uri(httpretty.POST, 'http://somewhere.com/test')\n r = CkanResource('http://somewhere.com/test', None, {'offset': 100, 'limit': 100})\n r._get_response(200, 20)\n self._assert_params_equals(httpretty.last_request().path, {'offset': 4100, 'limit': 20})", "def test_large_circuit(self):\n # Specify a type of circuit used in this test\n self.check_circuit_type('large')", "async def test_20() -> None:\n LOG.debug(\"Test post query (endMin > endMax)\")\n payload = {\n \"referenceName\": \"MT\",\n \"endMin\": 21,\n \"endMax\": 20,\n \"referenceBases\": \"T\",\n \"variantType\": \"SNP\",\n \"assemblyId\": \"GRCh38\",\n \"includeDatasetResponses\": \"HIT\",\n }\n async with aiohttp.ClientSession() as session:\n async with session.post(\"http://localhost:5050/query\", data=json.dumps(payload)) as resp:\n data = await resp.json()\n assert data[\"exists\"] is None, sys.exit(\"Query POST Endpoint Error!\")\n assert resp.status == 400, \"HTTP Status code error\"", "def chkLimits(name, value, Min, Max, unit = 'V', Hex = False):\n\n #global Log\n if not Min < value < Max:\n if Hex:\n line = \"%s:0x%X OUT OF LIMITS (0x%X, 0x%X). Test Failed !\" %(name, value, Min, Max)\n else:\n line = \"%s:%F %s OUT OF LIMITS (%F, %f). Test Failed !\" %(name, value, unit, Min, Max)\n Log.logError(line)\n Err.bumpError()\n return False\n if Hex:\n Log.logText(' '+'%s:0x%X expected range from:0x%X To: 0x%X. Test PASS !'% (name, value, Min, Max))\n else:\n Log.logText(' '+'%s:%F %s expected range From:%F %s To: %F %s. Test PASS !'% (name, value, unit, Min,unit, Max, unit))\n return True", "def test_deal_sufficient_cards(self):\n cards = self.deck._deal(10)\n self.assertEqual(len(cards), 10)\n self.assertEqual(self.deck.count(), 42)" ]
[ "0.6602689", "0.65843254", "0.65533227", "0.63588923", "0.63528883", "0.6254358", "0.6215978", "0.6180673", "0.61653847", "0.61470395", "0.6145852", "0.6117478", "0.61015135", "0.6094627", "0.6062201", "0.5976109", "0.59308255", "0.59041405", "0.5900028", "0.5873644", "0.5867886", "0.5865996", "0.58463115", "0.5842891", "0.58405894", "0.58393717", "0.5837828", "0.58350384", "0.582851", "0.5822362", "0.58175164", "0.58133245", "0.5810695", "0.5802446", "0.5801542", "0.57908916", "0.5780881", "0.5771568", "0.5759367", "0.57346565", "0.57316184", "0.573126", "0.5726856", "0.5726727", "0.5720389", "0.5718015", "0.5717796", "0.5717048", "0.57033145", "0.5700407", "0.5697943", "0.56889445", "0.5684637", "0.5684625", "0.56800497", "0.56772095", "0.56692755", "0.56672", "0.5654847", "0.56434345", "0.56290895", "0.56156576", "0.56124204", "0.5611786", "0.5605865", "0.5600559", "0.55952954", "0.55941147", "0.55941147", "0.5590475", "0.5588605", "0.55882645", "0.55867964", "0.5584384", "0.5582503", "0.55793923", "0.55750656", "0.55678886", "0.55646724", "0.55591565", "0.5556988", "0.5553812", "0.55492514", "0.5547389", "0.5543689", "0.55432564", "0.5540811", "0.55375475", "0.5533645", "0.5526904", "0.5526203", "0.552563", "0.55201364", "0.5516596", "0.55151737", "0.5505842", "0.54968", "0.54933345", "0.5488313", "0.54865396" ]
0.7109697
0
Test limit with completed Aqueduct.
def test_limit_with_insula_and_aqueduct(self): d = TestDeck() g = test_setup.simple_two_player() p1, p2 = g.players self.assertEqual(g._clientele_limit(p1), 2) p1.buildings.append(Building(d.aqueduct, 'Concrete', complete=True)) p1.buildings.append(Building(d.insula, 'Rubble', complete=True)) self.assertEqual(g._clientele_limit(p1), 8) p1.influence = ['Stone'] self.assertEqual(g._clientele_limit(p1), 14)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_limit_with_aqueduct(self):\n d = TestDeck()\n\n g = test_setup.simple_two_player()\n\n p1, p2 = g.players\n\n self.assertEqual(g._clientele_limit(p1), 2)\n\n p1.buildings.append(Building(d.aqueduct, 'Concrete', complete=True))\n\n self.assertEqual(g._clientele_limit(p1), 4)\n\n p1.influence = ['Stone']\n\n self.assertEqual(g._clientele_limit(p1), 10)", "def test_environmental_impact_compliance():\n emissions = 12000\n legal_limit = 300\n assert emissions < legal_limit", "def test_get_remain_limit(self):\n finder = FinderInsidePro(self.test_key)\n limit = finder.get_remain_limit()\n assert isinstance(limit, int)\n assert limit > 0", "def test_identify_limit(limit, all, expected):\n assert identify_limit(limit, all) == expected", "def test_deploy_more_vms_than_limit_allows(self):\n self.test_limits(vm_limit=2)", "def calculate(self, limit):\r\n pass", "def test_update_instance_limit(self):\n pass", "def test_update_hyperflex_feature_limit_internal(self):\n pass", "def test_set_project_limits(self):\n pass", "def test_create_hyperflex_feature_limit_internal(self):\n pass", "def test_collection_limit(testapp):\n obj1 = {\n 'title': \"Testing1\",\n 'description': \"This is testing object 1\",\n }\n obj2 = {\n 'title': \"Testing2\",\n 'description': \"This is testing object 2\",\n }\n obj3 = {\n 'title': \"Testing3\",\n 'description': \"This is testing object 3\",\n }\n testapp.post_json('/embedding-tests', obj1, status=201)\n testapp.post_json('/embedding-tests', obj2, status=201)\n testapp.post_json('/embedding-tests', obj3, status=201)\n res_all = testapp.get('/embedding-tests/?limit=all', status=200)\n res_2 = testapp.get('/embedding-tests/?limit=2', status=200)\n assert len(res_all.json['@graph']) == 3\n assert len(res_2.json['@graph']) == 2", "def calculate(self, limit):\n pass", "def test_capacity_factor(pudl_out_eia):\n print(\"\\nCalculating generator capacity factors...\")\n cf = pudl_out_eia.capacity_factor()\n print(f\" capacity_factor: {len(cf)} records\")", "def test_get_limit_no_dependants(self):\n self.assertEqual(\n gross_income.get_limit(),\n gross_income.BASE_LIMIT\n )", "def test_initial_limit(self):\n\n g = test_setup.simple_two_player()\n\n p1, p2 = g.players\n\n self.assertEqual(g._clientele_limit(p1), 2)\n self.assertEqual(g._clientele_limit(p2), 2)", "def test_limit(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url + \"?limit=5\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 5)\n self.assertEqual(channel.json_body[\"next_token\"], 5)\n self._check_fields(channel.json_body[\"event_reports\"])", "def test_update_hyperflex_feature_limit_external(self):\n pass", "def test_limits(manager):\n manager.update(days=40)\n compare_results_attrs(manager.items, fixtures.FIXTURES[51])", "def testVerifyArbitraryLimits(self):\n\t\tpolicy = MinimumPlaybackPolicy(3)\n\t\tfor x in range(0, 3):\n\t\t\tself.failIf(policy.hasBeenPlayedBack)\n\t\t\tself.failIf(policy.isReadyForRemoval)\n\t\t\tpolicy.playback()\n\t\tself.failUnless(policy.hasBeenPlayedBack)\n\t\tself.failIf(policy.isReadyForRemoval)", "def test_limits_boundary_values(self):\n\n def check_error_msg(status, output, storagelimit=False):\n import json\n if status == False:\n content = json.loads(output)[\"errors\"]\n if storagelimit:\n actual_error = content[\"dataStorageLimit\"]\n expected_error = '\"dataStorageLimit\" must be an integer between -1 and 100000'\n else:\n actual_error = content[\"dataThrottleLimit\"]\n expected_error = '\"dataThrottleLimit\" must be an integer between -1 and 2147483647'\n self.assertEqual(actual_error, expected_error)\n else:\n self.fail(\"expected to fail but passsed\")\n\n bucket = self.cluster.buckets[0]\n server = random.choice(bucket.servers)\n bucket_helper = BucketHelper(server)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=-2)\n check_error_msg(status, content)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=2147483648)\n check_error_msg(status, content)\n\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n storage_limit=-2)\n check_error_msg(status, content, True)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n storage_limit=2147483648)\n check_error_msg(status, content, True)\n\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=-2,\n storage_limit=-2)\n check_error_msg(status, content)\n check_error_msg(status, content, True)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=2147483648,\n storage_limit=2147483648)\n check_error_msg(status, content)\n check_error_msg(status, content, True)", "def test_exceeded_limit(self):\n msg=self.sample_data(\"error_exceeded_limit.xml\")\n error = ErrorParser().process_all(msg)\n assert isinstance(error, PatronLoanLimitReached)\n eq_(u'Patron cannot loan more than 12 documents', error.message)", "def __init__(self, reason, lim=0):\n self.successes = 0\n self.tests = 0\n self.reason = reason\n self.limit = lim", "def test_launch_assignments_with_concurrent_unit_cap(self):\n cap_values = [1, 2, 3, 4, 5]\n for max_num_units in cap_values:\n mock_data_array = self.get_mock_assignment_data_array()\n launcher = TaskLauncher(\n self.db,\n self.task_run,\n mock_data_array,\n max_num_concurrent_units=max_num_units,\n )\n launcher.launched_units = LimitedDict(launcher.max_num_concurrent_units)\n launcher.create_assignments()\n launcher.launch_units(\"dummy-url:3000\")\n\n start_time = time.time()\n while set([u.get_status() for u in launcher.units]) != {AssignmentState.COMPLETED}:\n for unit in launcher.units:\n if unit.get_status() == AssignmentState.LAUNCHED:\n unit.set_db_status(AssignmentState.COMPLETED)\n time.sleep(0.1)\n self.assertEqual(launcher.launched_units.exceed_limit, False)\n curr_time = time.time()\n self.assertLessEqual(curr_time - start_time, MAX_WAIT_TIME_UNIT_LAUNCH)\n launcher.expire_units()\n self.tearDown()\n self.setUp()", "def test_check_cost():", "def test_25(self):\n assert 'False' == Api.requestBlock('test-25')", "def test_update_instance_limit1(self):\n pass", "def test_fail_on_rate_limit_exceeded(self):\n\n # setup 'short' limit for testing\n self.client.protocol.rate_limiter.rules = []\n self.client.protocol.rate_limiter.rules.append(\n XRateLimitRule(\n {\n \"short\": {\n \"usage\": 0,\n \"limit\": 600,\n \"time\": 5,\n \"lastExceeded\": None,\n },\n \"long\": {\n \"usage\": 0,\n \"limit\": 30000,\n \"time\": 5,\n \"lastExceeded\": None,\n },\n }\n )\n )\n\n # interact with api to get the limits\n self.client.get_athlete()\n\n # access the default rate limit rule\n rate_limit_rule = self.client.protocol.rate_limiter.rules[0]\n\n # get any of the rate limits, ex the 'short'\n limit = rate_limit_rule.rate_limits[\"short\"]\n\n # get current usage\n usage = limit[\"usage\"]\n print(\"last rate limit usage is {0}\".format(usage))\n\n # for testing purpses set the limit to usage\n limit[\"limit\"] = usage\n print(\"changing limit to {0}\".format(limit[\"limit\"]))\n\n # expect exception because of RateLimit has been\n # exceeded (or reached max)\n with self.assertRaises(exc.RateLimitExceeded):\n self.client.get_athlete()\n\n # request fired to early (less than 5 sec) causes timeout exception\n with self.assertRaises(exc.RateLimitTimeout):\n self.client.get_athlete()\n\n # once rate limit has exceeded wait until another request is possible\n # check if timeout has been set\n self.assertTrue(rate_limit_rule.limit_timeout > 0)\n print(\"limit timeout {0}\".format(rate_limit_rule.limit_timeout))\n\n # resetting limit\n # simulates Strava api - it would set the usage again to 0\n limit[\"limit\"] = 600\n print(\"resetting limit to {0}\".format(limit[\"limit\"]))\n\n try:\n # waiting until timeout expires\n time.sleep(5)\n\n # this time it should work again\n self.client.get_athlete()\n self.assertTrue(\"No exception raised\")\n except exc.RateLimitExceeded as e:\n self.fail(\"limiter raised RateLimitTimeout unexpectedly!\")\n\n # continue other tests with DefaultRateLimiter\n print(\"setting default rate limiter\")\n self.client.protocol.rate_limiter = DefaultRateLimiter()", "def limit(self, limit):\n raise NotImplementedError(\"This should have been implemented.\")", "def test_get_speed_limit():\n center = Coordinates(1 , 1)\n radius = 10\n speed_limit = 20\n\n assert get_speed_limit(center, radius, speed_limit) != center\n assert get_speed_limit(center, radius, speed_limit) != radius\n assert get_speed_limit(center, radius, speed_limit) == speed_limit", "def test_it(self):\n self.n += 1\n if self.n >= 5:\n self.fail(\"eventually failing\")", "def test_05_user_progress(self):\r\n url = '/api/app/1/userprogress'\r\n self.check_limit(url, 'get', 'app')", "def test_api_requests_limited(self):\n\n did_reach_rate_limit = False\n for _ in range(110):\n response = self.send_get('Participant', expected_status=None)\n if response.status_code == TooManyRequests.code:\n did_reach_rate_limit = True\n break\n\n self.assertTrue(did_reach_rate_limit)", "def calculate(self, limit: int) -> None:\n raise NotImplementedError()", "def test_counter_proposal_offer(self):\n pass", "def test_communities_created_limit(self):\n self.login_as(\"ben\")\n for i in range(settings.QUIZZZ_CREATED_COMMUNITIES_LIMIT):\n response = self.client.post(self.url, {\"name\": f\"test-group-{i}\"})\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n response = self.client.post(self.url, {\"name\": \"pushthelimit\"})\n self.assert_validation_failed(response, \n data=[\"You have reached the limit for communities created.\"])", "def test_maximum_items(self):\n total = 4711\n self.es.set_maximum_items(total)\n self.assertEqual(self.es._total, total)", "def testsize(self):\n for size in range(5):\n a = AmuletAbility('Skepticism', size=size+1)\n self.assert_(str(size+1) in str(a))\n self.assertEqual(a.size, size+1)\n self.assertTrue(isinstance(a.AC, int))\n self.assertTrue(isinstance(a.description(), str))", "def test_create_hyperflex_feature_limit_external(self):\n pass", "def test_30(self):\n assert 'False' == Api.requestBlock('test-30')", "def test_mem_limit_too_high():\n args = argparse.Namespace(cfg=os.path.join(TEST_DATA_DIR, 'mem-limit-too-high.ini'))\n with pytest.raises(UserReportError) as err:\n cfg = ElasticBlastConfig(configure(args), task = ElbCommand.SUBMIT)\n assert err.value.returncode == INPUT_ERROR\n m = re.match(r'Memory limit.*exceeds', err.value.message)\n assert m is not None", "def STAND_LIMIT() -> int:\n return 15", "def test_exceed_limit_request(self):\n actions.login(ADMIN_EMAIL)\n ids_list = list(range(SkillAggregateRestHandler.MAX_REQUEST_SIZE))\n get_url = '%s?%s' % (self.URL, urllib.urlencode({\n 'ids': ids_list}, True))\n\n response = transforms.loads(self.get(get_url).body)\n self.assertEqual(412, response['status'])", "def test_module_example(self, tol):\n\n # Defines the wires and the graph on which MaxCut is being performed\n wires = range(3)\n graph = Graph([(0, 1), (1, 2), (2, 0)])\n\n # Defines the QAOA cost and mixer Hamiltonians\n cost_h, mixer_h = qaoa.maxcut(graph)\n\n # Defines a layer of the QAOA ansatz from the cost and mixer Hamiltonians\n def qaoa_layer(gamma, alpha):\n qaoa.cost_layer(gamma, cost_h)\n qaoa.mixer_layer(alpha, mixer_h)\n\n # Repeatedly applies layers of the QAOA ansatz\n def circuit(params, **kwargs):\n for w in wires:\n qml.Hadamard(wires=w)\n\n qml.layer(qaoa_layer, 2, params[0], params[1])\n\n # Defines the device and the QAOA cost function\n dev = qml.device(\"default.qubit\", wires=len(wires))\n cost_function = qml.ExpvalCost(circuit, cost_h, dev)\n\n res = cost_function([[1, 1], [1, 1]])\n expected = -1.8260274380964299\n\n assert np.allclose(res, expected, atol=tol, rtol=0)", "def _check_capacity_limit(self, res, amt, balance, meta, raven_vars, dispatch, t):\n cap = self.get_capacity(meta, raven_vars, dispatch, t)[0][self._capacity_var]\n try:\n if abs(balance[self._capacity_var]) > abs(cap):\n #ttttt\n # do the inverse problem: how much can we make?\n balance, meta = self.produce_max(meta, raven_vars, dispatch, t)\n print('The full requested amount ({res}: {req}) was not possible, so accessing maximum available instead ({res}: {blc}).'.format(res=res, req=amt, blc=balance[res]))\n except KeyError:\n raise SyntaxError('Resource \"{}\" is listed as capacity limiter, but not an output of the component! Got: {}'.format(self._capacity_var, balance))\n return balance, meta", "def test_05_vmcp(self):\r\n url = '/api/vmcp'\r\n self.check_limit(url, 'get', 'app')", "def test_huge_answers(self):\n self.init_player(\n '0', 'Welcome to Oppia!', 'do you know where the name \\'Oppia\\'')\n self.submit_and_compare(\n '0', '', 'In fact, the word Oppia means \\'learn\\'.')\n # This could potentially cause errors in stats_models when the answer\n # is persisted to the backend.\n self.submit_and_compare(\n 'a' * 1000500, 'Sorry, nope, we didn\\'t get it', '')", "def test_composition_adds_to_100_percent(self):", "def testsize(self):\n for size in range(5):\n AttributeAbility(size=size + 1)", "def test_change_provisioned_throughput_usual_case():", "def test_operate_resource_cap_max(self, on):\n\n if on is False:\n override = {}\n else:\n override = {\"techs.test_supply_plus.constraints.resource_cap_max\": 1e6}\n m = build_model(\n override, \"simple_supply_and_supply_plus,operate,investment_costs\"\n )\n\n with pytest.warns(exceptions.ModelWarning) as warning:\n m.run(build_only=True)\n if on is False:\n assert check_error_or_warning(\n warning, \"Resource capacity constraint defined and set to infinity\"\n )\n assert np.isinf(\n m._model_data.resource_cap.loc[\"a\", \"test_supply_plus\"].item()\n )\n elif on is True:\n assert not check_error_or_warning(\n warning, \"Resource capacity constraint defined and set to infinity\"\n )\n assert m._model_data.resource_cap.loc[\"a\", \"test_supply_plus\"].item() == 1e6", "def test_limit_cpu(self):\n url = '/api/apps'\n body = {'cluster': 'autotest'}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n app_id = response.data['id']\n url = '/api/apps/{app_id}/limits'.format(**locals())\n # check default limit\n response = self.client.get(url, content_type='application/json')\n self.assertEqual(response.status_code, 200)\n self.assertIn('cpu', response.data)\n self.assertEqual(json.loads(response.data['cpu']), {})\n # regression test for https://github.com/deis/deis/issues/1563\n self.assertNotIn('\"', response.data['cpu'])\n # set an initial limit\n body = {'cpu': json.dumps({'web': '1024'})}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n self.assertIn('x-deis-release', response._headers)\n limit1 = response.data\n # check memory limits\n response = self.client.get(url, content_type='application/json')\n self.assertEqual(response.status_code, 200)\n self.assertIn('cpu', response.data)\n cpu = json.loads(response.data['cpu'])\n self.assertIn('web', cpu)\n self.assertEqual(cpu['web'], '1024')\n # set an additional value\n body = {'cpu': json.dumps({'worker': '512'})}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n limit2 = response.data\n self.assertNotEqual(limit1['uuid'], limit2['uuid'])\n cpu = json.loads(response.data['cpu'])\n self.assertIn('worker', cpu)\n self.assertEqual(cpu['worker'], '512')\n self.assertIn('web', cpu)\n self.assertEqual(cpu['web'], '1024')\n # read the limit again\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n limit3 = response.data\n self.assertEqual(limit2, limit3)\n cpu = json.loads(response.data['cpu'])\n self.assertIn('worker', cpu)\n self.assertEqual(cpu['worker'], '512')\n self.assertIn('web', cpu)\n self.assertEqual(cpu['web'], '1024')\n # unset a value\n body = {'memory': json.dumps({'worker': None})}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n limit4 = response.data\n self.assertNotEqual(limit3['uuid'], limit4['uuid'])\n self.assertNotIn('worker', json.dumps(response.data['memory']))\n # disallow put/patch/delete\n self.assertEqual(self.client.put(url).status_code, 405)\n self.assertEqual(self.client.patch(url).status_code, 405)\n self.assertEqual(self.client.delete(url).status_code, 405)\n return limit4", "def test_max_members(self):\n self.login_as(self.USER)\n\n group_members = Membership.objects.filter(community_id=self.GROUP_ID).count()\n Community.objects.filter(pk=self.GROUP_ID).update(max_members=group_members)\n \n with self.assertNumQueries(5):\n response = self.client.post(self.url, self.payload)\n self.assert_validation_failed(response, data={\n \"non_field_errors\": [\"This group has reached its member limit.\"]\n })\n self.assertEqual(Membership.objects.count(), self.num_memberships)", "def test(self):\n return test_throttle_method()", "def test_get_limit_4_dependants(self):\n self.assertEqual(\n gross_income.get_limit(dependant_children=4),\n gross_income.BASE_LIMIT\n )", "def testEnsurePlaybacksAreLimited(self):\n\t\tpolicy = FixedCountPolicy()\n\t\tself.failIf(policy.hasUnlimitedPlaybacks)", "def test_40(self):\n assert 'False' == Api.requestBlock('test-40')", "def test_custom_per_project_upper_limit(self):\n data = {'payment_amount': '50.00'}\n account = Account(goal=8000, current=3001)\n form = DonationAmountForm(data=data, account=account)\n self.assertFalse(form.is_valid())\n errors = form.errors.as_data()\n self.assertEqual('max_value', errors['payment_amount'][0].code)\n self.assertTrue('$49.99' in errors['payment_amount'][0].message)\n\n account.current = 3000\n form = DonationAmountForm(data=data, account=account)\n self.assertTrue(form.is_valid())", "def test_default_limit(self):\n telem = self.create_logs(self.user1, num=200)\n\n response = self.client.get(telemetry_url)\n self.assertEqual(200, response.status_code)\n\n data = json.loads(response.content)\n\n self.assertEqual(100, len(data))", "async def test_exectution_limit_throttle(coresys: CoreSys, loop: asyncio.BaseEventLoop):\n\n class TestClass:\n \"\"\"Test class.\"\"\"\n\n def __init__(self, coresys: CoreSys):\n \"\"\"Initialize the test class.\"\"\"\n self.coresys = coresys\n self.run = asyncio.Lock()\n self.call = 0\n\n @Job(limit=JobExecutionLimit.THROTTLE, throttle_period=timedelta(hours=1))\n async def execute(self, sleep: float):\n \"\"\"Execute the class method.\"\"\"\n assert not self.run.locked()\n async with self.run:\n await asyncio.sleep(sleep)\n self.call += 1\n\n test = TestClass(coresys)\n\n await asyncio.gather(*[test.execute(0.1), test.execute(0.1), test.execute(0.1)])\n assert test.call == 1\n\n await asyncio.gather(*[test.execute(0.1)])\n assert test.call == 1", "def test_38(self):\n assert 'True' == Api.requestBlock('test-38')", "def test_10(self):\n assert 'False' == Api.requestBlock('test-10')", "def test_50(self):\n assert 'False' == Api.requestBlock('test-50', CustomFields=True)", "def test_valid_calculation_of_quantile(alpha: Any) -> None:\n n = 30\n check_alpha_and_n_samples(alpha, n)", "def test_39(self):\n assert 'True' == Api.requestBlock('test-39')", "async def test_exectution_limit_throttle_wait(\n coresys: CoreSys, loop: asyncio.BaseEventLoop\n):\n\n class TestClass:\n \"\"\"Test class.\"\"\"\n\n def __init__(self, coresys: CoreSys):\n \"\"\"Initialize the test class.\"\"\"\n self.coresys = coresys\n self.run = asyncio.Lock()\n self.call = 0\n\n @Job(limit=JobExecutionLimit.THROTTLE_WAIT, throttle_period=timedelta(hours=1))\n async def execute(self, sleep: float):\n \"\"\"Execute the class method.\"\"\"\n assert not self.run.locked()\n async with self.run:\n await asyncio.sleep(sleep)\n self.call += 1\n\n test = TestClass(coresys)\n\n await asyncio.gather(*[test.execute(0.1), test.execute(0.1), test.execute(0.1)])\n assert test.call == 1\n\n await asyncio.gather(*[test.execute(0.1)])\n assert test.call == 1", "def test_limit(self):\n\t\tfor lim in [1, '234', -100, '-200']:\n\t\t\tself.filter.set_limit(lim)\n\t\t\tself.assertEqual(int(lim), self.filter.get_limit(), \"Limit mismatch: %s!=%s\" % (lim, self.filter.get_limit()))\n\t\tself.filter.set_limit('test')\n\t\tself.assertEqual('test', self.filter.get_limit(), \"String set failed for Filter limit.\")", "def test_returns_limit_projects(self):\n # Arrange\n # Create and arrange test projects\n self.arrange_projects()\n # Act\n response = self.client.get(\n f\"{self.url}?limit=1\", headers={\"Authorization\": self.user_session_token}\n )\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json[\"results\"]), 1)", "def test(self):\n return self._test(result_count=1, failure_amount=1)", "def test(self):\n return self._test(result_count=1, failure_amount=1)", "def test_get_damage_out_of_limit(self):\n self.veh.health = 0.24\n for op in self.veh.operators:\n op.health = 0.1\n self.veh.get_damage(0.5)\n self.assertEqual(self.veh.health, 0)\n self.assertEqual(self.veh.operators[0].health, 0.05)\n self.assertEqual(self.veh.operators[1].health, 0.05)", "def _test_out_of_range(self):\n self.cdbconf.setup('KKG')\n self.cdbconf.setConfiguration('CUSTOM_OPT')\n az, el, latitude = [radians(50)] * 3\n site_info = {'latitude': latitude}\n self.p.setup(site_info, self.source, self.device)\n self.p.setRewindingMode('AUTO')\n offset = 20\n max_limit = self.device.getMaxLimit() \n min_limit = self.device.getMinLimit()\n Pis = max_limit - offset/2\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.p.setPosition(Pis)\n time.sleep(0.2) # Wait a bit for the setup\n max_rewinding_steps = (max_limit - min_limit) // self.device.getStep()\n expected = Pis - max_rewinding_steps*self.device.getStep() + offset\n self.source.setAzimuth(az)\n self.source.setElevation(el)\n self.p.startUpdating('MNG_TRACK', 'ANT_NORTH', az, el, None, None)\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.p.setOffset(offset)\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.assertEqual(self.device.getActPosition(), expected)", "def test_20(self):\n assert 'False' == Api.requestBlock('test-20')", "def test_sad_purchasePlaces_12_places_max__step_by_step(self):\n\n club_index = self.add_fake_club(points=100)\n\n print(\"INIT:\", self.competitions, self.clubs)\n\n points = int(self.clubs[club_index][\"points\"])\n slots = int(self.competitions[0][\"numberOfPlaces\"])\n booked = 0\n\n num_actions = 12 + 1\n\n for i in range(1, num_actions + 1):\n rv = self.app.post(\n \"/purchasePlaces\",\n data={\n \"places\": 1,\n \"club\": self.clubs[club_index][\"name\"],\n \"competition\": self.competitions[0][\"name\"],\n },\n )\n\n booked += 1\n print(i, \"\\n\", rv.data, rv.status_code, \"\\n\", server.booking)\n\n if i < num_actions - 1:\n cost = points - (self.cost_per_place * booked)\n assert rv.status_code in [200]\n assert str.encode(f\"Number of Places: {slots-booked}\") in rv.data\n assert str.encode(f\"Points available: {cost}\") in rv.data\n\n assert rv.status_code in [400]\n assert b\"You can&#39;t book more than 12 places per competition\" in rv.data", "def test_set_glass_capacity__with_valid_numbers__returns_expected():\n glass = moet.create_glass(\"A\")\n numbers = [0, 1, 250, 0.0, 100.5]\n for number in numbers:\n glass.capacity = number\n assert glass.capacity == number", "def test_results_throttling(self, affiliate_items):\n call_count = 0\n\n def throttle_initial(*args, **kwargs):\n nonlocal call_count\n call_count += 1\n if call_count < 4:\n raise ThrottlingError()\n\n update_function = mock.Mock(side_effect=throttle_initial)\n batch_job = BatchJob(affiliate_items, update_function)\n\n with mock.patch('chiton.rack.affiliates.bulk.sleep') as delay_function:\n for result in batch_job.run():\n pass\n\n assert delay_function.call_count == 3\n assert update_function.call_count == 7", "def test_allowed_result_sizes(fxc, endpoint, size):\n fn_uuid = fxc.register_function(\n large_result_producer, endpoint, description=\"LargeResultProducer\"\n )\n task_id = fxc.run(\n size, # This is the current result size limit\n endpoint_id=endpoint,\n function_id=fn_uuid,\n )\n\n x = wait_for_task(fxc, task_id, walltime=10)\n assert len(x) == size, \"Result size does not match excepted size\"", "def test_31(self):\n assert 'True' == Api.requestBlock('test-31')", "def test_update_virtualization_realm_maximum_impact_level(self):\n pass", "def test_patch_hyperflex_feature_limit_internal(self):\n pass", "def test_remain():\r\n global pickno\r\n #Change pick number to the total amount of balls\r\n # Ex. If we have 3 balls remaining the user cannot pick 4\r\n if total <= 4:\r\n pickno = total", "def _check_capacity_limit(self, res, amt, balance, meta, raven_vars, dispatch, t, level):\n # note \"amt\" has units of AMOUNT not RATE (resource, not resource per second)\n sign = np.sign(amt)\n # are we storing or providing?\n #print('DEBUGG supposed current level:', level)\n if sign < 0:\n # we are being asked to consume some\n cap, meta = self.get_capacity(meta, raven_vars, dispatch, t)\n available_amount = cap[res] - level\n #print('Supposed Capacity, Only calculated ins sign<0 (being asked to consumer)',cap)\n else:\n # we are being asked to produce some\n available_amount = level\n # the amount we can consume is the minimum of the requested or what's available\n delta = sign * min(available_amount, abs(amt))\n return {res: delta}, meta", "def test_37(self):\n assert 'False' == Api.requestBlock('test-37')", "def test_limit_minus(self, mock):\n mock.configure_mock(**(self.config_payload(1, 1)))\n d = lf.lambda_handler(event=self.lambdaevent_minus, context=None)\n self.assertEqual(d, 0)\n mock.client.return_value.update_thing_shadow.assert_called_once_with(\n thingName=self.thingname,\n payload=lf.payload_put(lf.shadow_update_data))", "def test_request_limit_inner_larger(self):\n httpretty.register_uri(httpretty.POST, 'http://somewhere.com/test')\n r = CkanResource('http://somewhere.com/test', None, {'offset': 100, 'limit': 100})\n r._get_response(10, 200)\n self._assert_params_equals(httpretty.last_request().path, {'offset': 2100, 'limit': 100})", "def test_some_meet(self, initial_placement_fixture):\n assert len(ctx.cluster.influx_db.aggregate_performance()) == 0, \\\n \"Test should run on the basic model\"\n self.generic_function(above_objective=2)", "def test_store_elements_count_exceed_default_limit(self, mocker):\n proxy = mocker.patch('saana_lib.ranking.RankingToDatabase.proxy')\n _compute = mocker.patch('saana_lib.ranking.Ranking.compute')\n\n _compute.return_value = dict((i, list(range(5))) for i in range(10))\n self.klass.store()\n assert proxy.call_count == 20", "def test_delete_hyperflex_feature_limit_internal(self):\n pass", "def test_limit_memory(self):\n url = '/api/apps'\n body = {'cluster': 'autotest'}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n app_id = response.data['id']\n url = '/api/apps/{app_id}/limits'.format(**locals())\n # check default limit\n response = self.client.get(url, content_type='application/json')\n self.assertEqual(response.status_code, 200)\n self.assertIn('memory', response.data)\n self.assertEqual(json.loads(response.data['memory']), {})\n # regression test for https://github.com/deis/deis/issues/1563\n self.assertNotIn('\"', response.data['memory'])\n # set an initial limit\n mem = {'web': '1G'}\n body = {'memory': json.dumps(mem)}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n self.assertIn('x-deis-release', response._headers)\n limit1 = response.data\n # check memory limits\n response = self.client.get(url, content_type='application/json')\n self.assertEqual(response.status_code, 200)\n self.assertIn('memory', response.data)\n memory = json.loads(response.data['memory'])\n self.assertIn('web', memory)\n self.assertEqual(memory['web'], '1G')\n # set an additional value\n body = {'memory': json.dumps({'worker': '512M'})}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n limit2 = response.data\n self.assertNotEqual(limit1['uuid'], limit2['uuid'])\n memory = json.loads(response.data['memory'])\n self.assertIn('worker', memory)\n self.assertEqual(memory['worker'], '512M')\n self.assertIn('web', memory)\n self.assertEqual(memory['web'], '1G')\n # read the limit again\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n limit3 = response.data\n self.assertEqual(limit2, limit3)\n memory = json.loads(response.data['memory'])\n self.assertIn('worker', memory)\n self.assertEqual(memory['worker'], '512M')\n self.assertIn('web', memory)\n self.assertEqual(memory['web'], '1G')\n # unset a value\n body = {'memory': json.dumps({'worker': None})}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n limit4 = response.data\n self.assertNotEqual(limit3['uuid'], limit4['uuid'])\n self.assertNotIn('worker', json.dumps(response.data['memory']))\n # disallow put/patch/delete\n self.assertEqual(self.client.put(url).status_code, 405)\n self.assertEqual(self.client.patch(url).status_code, 405)\n self.assertEqual(self.client.delete(url).status_code, 405)\n return limit4", "def test_container_get_progress(self):\r\n progress = self.combinedoe_container.max_score()\r\n self.assertEqual(progress, None)", "def test_max(doctest):", "def test_29(self):\n assert 'True' == Api.requestBlock('test-29')", "def test_rate_limited(self):\n response = self._mock_utility(get_kwargs=self._data(),\n error=fitbit_exceptions.HTTPConflict)\n self._check_response(response, 105)", "def test_set_maximum(self):\n self.server_widget.maximum = 1000\n assert self.client_widget.maximum == self.server_widget.maximum", "def test_query_train_jobs_with_exceeded_limit(self, client):\n params = dict(offset=0, limit=1000)\n url = get_url(BASE_URL, params)\n response = client.get(url)\n result = response.get_json()\n assert result.get('error_code') == '50540002'", "def test_calculate_number_page_number_products_max_different(self):\n self.requestapi = RequestApi('snack', number_products_max=1000)\n self.requestapi.number_products = 1000\n result = self.requestapi.calculate_number_page()\n self.assertEqual(result, 10)", "def test_request_limit_overflow(self):\n httpretty.register_uri(httpretty.POST, 'http://somewhere.com/test')\n r = CkanResource('http://somewhere.com/test', None, {'offset': 100, 'limit': 100})\n r._get_response(200, 20)\n self._assert_params_equals(httpretty.last_request().path, {'offset': 4100, 'limit': 20})", "def test_large_circuit(self):\n # Specify a type of circuit used in this test\n self.check_circuit_type('large')", "async def test_20() -> None:\n LOG.debug(\"Test post query (endMin > endMax)\")\n payload = {\n \"referenceName\": \"MT\",\n \"endMin\": 21,\n \"endMax\": 20,\n \"referenceBases\": \"T\",\n \"variantType\": \"SNP\",\n \"assemblyId\": \"GRCh38\",\n \"includeDatasetResponses\": \"HIT\",\n }\n async with aiohttp.ClientSession() as session:\n async with session.post(\"http://localhost:5050/query\", data=json.dumps(payload)) as resp:\n data = await resp.json()\n assert data[\"exists\"] is None, sys.exit(\"Query POST Endpoint Error!\")\n assert resp.status == 400, \"HTTP Status code error\"", "def chkLimits(name, value, Min, Max, unit = 'V', Hex = False):\n\n #global Log\n if not Min < value < Max:\n if Hex:\n line = \"%s:0x%X OUT OF LIMITS (0x%X, 0x%X). Test Failed !\" %(name, value, Min, Max)\n else:\n line = \"%s:%F %s OUT OF LIMITS (%F, %f). Test Failed !\" %(name, value, unit, Min, Max)\n Log.logError(line)\n Err.bumpError()\n return False\n if Hex:\n Log.logText(' '+'%s:0x%X expected range from:0x%X To: 0x%X. Test PASS !'% (name, value, Min, Max))\n else:\n Log.logText(' '+'%s:%F %s expected range From:%F %s To: %F %s. Test PASS !'% (name, value, unit, Min,unit, Max, unit))\n return True", "def test_deal_sufficient_cards(self):\n cards = self.deck._deal(10)\n self.assertEqual(len(cards), 10)\n self.assertEqual(self.deck.count(), 42)" ]
[ "0.7108374", "0.6605307", "0.6584849", "0.655564", "0.6354267", "0.62519914", "0.6215818", "0.6181438", "0.61679924", "0.61482066", "0.61449", "0.6115197", "0.6100592", "0.60953647", "0.60620195", "0.5975527", "0.5931888", "0.59042233", "0.5899593", "0.5873513", "0.58678395", "0.58672076", "0.58483773", "0.5844977", "0.5841321", "0.5839646", "0.5837378", "0.58345795", "0.58299065", "0.582432", "0.5815384", "0.5810892", "0.5808957", "0.58038276", "0.5802125", "0.5790945", "0.57814646", "0.5773077", "0.57601905", "0.5737488", "0.5731635", "0.57295936", "0.572927", "0.57258123", "0.5720733", "0.57184494", "0.57179177", "0.57167965", "0.57057846", "0.5702611", "0.56982136", "0.56881124", "0.568488", "0.5684495", "0.56792694", "0.56776685", "0.56692386", "0.5667639", "0.56540304", "0.5644364", "0.562967", "0.5615222", "0.56136036", "0.5612479", "0.56052065", "0.56002295", "0.5597126", "0.55961657", "0.55961657", "0.55912924", "0.55894303", "0.55876416", "0.5587147", "0.55838025", "0.55834424", "0.55777484", "0.5576049", "0.55686796", "0.5565649", "0.5561297", "0.5555579", "0.5554718", "0.5547896", "0.5547445", "0.55450255", "0.5543724", "0.5540871", "0.55367446", "0.553337", "0.5530311", "0.55266213", "0.5524592", "0.5520397", "0.55169505", "0.55149496", "0.5504245", "0.5497897", "0.54936725", "0.54898435", "0.54853636" ]
0.63586307
4
fetch from local json post the first tweet in the list then record that the tweet was tweeted
def tweet(self): self.__refresh_local_tweets() if not self.tweets: return tweet_obj = self.tweets[0] # upload picture media_id = self.__upload_media(tweet_obj["img"]) # tweet with text, and image if not media_id: return self.__post_status(tweet_obj["text"], media_id) self.tweets.remove(tweet_obj) self.tweeted.append(tweet_obj) self.__update_local_tweets()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_posts(username):\r\n\r\n # Authenticate to Twitter\r\n auth = tweepy.OAuthHandler(twitter_credentials.CONSUMER_KEY, twitter_credentials.CONSUMER_SECRET)\r\n auth.set_access_token(twitter_credentials.ACCESS_TOKEN, twitter_credentials.ACCESS_TOKEN_SECRET)\r\n\r\n api = tweepy.API(auth)\r\n\r\n try:\r\n api.verify_credentials()\r\n print(\"Authentication OK\")\r\n except:\r\n print(\"Error during authentication\")\r\n\r\n alltweets=[]\r\n\r\n new_tweets = api.user_timeline(screen_name = username,count=200,tweet_mode='extended')\r\n status = new_tweets[0]\r\n json_str = json.dumps(status._json)\r\n\r\n #convert to string\r\n json_str = json.dumps(status._json)\r\n #deserialise string into python object\r\n parsed = json.loads(json_str)\r\n print(json.dumps(parsed, indent=4, sort_keys=True))\r\n\r\n # save most recent tweets\r\n alltweets.extend(new_tweets)\r\n\r\n # save the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n\r\n # keep grabbing tweets until there are no tweets left to grab\r\n while len(new_tweets) > 0:\r\n print(f\"getting tweets before {oldest}\")\r\n\r\n # all subsiquent requests use the max_id param to prevent duplicates\r\n new_tweets = api.user_timeline(screen_name=username, count=200, max_id=oldest,tweet_mode='extended')\r\n\r\n # save most recent tweets\r\n alltweets.extend(new_tweets)\r\n\r\n # update the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n\r\n print(f\"...{len(alltweets)} tweets downloaded so far\")\r\n\r\n\r\n outtweets=[]\r\n\r\n\r\n for item in alltweets:\r\n\r\n mined = {\r\n 'tweet_id': item.id,\r\n 'name': item.user.name,\r\n 'screen_name': item.user.screen_name,\r\n 'retweet_count': item.retweet_count,\r\n 'lang' : item.lang,\r\n 'text': item.full_text,\r\n 'mined_at': datetime.datetime.now(),\r\n 'created_at': item.created_at,\r\n 'favourite_count': item.favorite_count,\r\n 'hashtags': item.entities['hashtags'],\r\n 'status_count': item.user.statuses_count,\r\n 'location': item.place,\r\n 'source_device': item.source\r\n }\r\n\r\n try:\r\n mined['retweet_text'] = item.retweeted_status.full_text # In case the tweet is a RT, there is a need to\r\n # retrieve the retweet_text field which contains the full comment (up to 280 char) accompanying the retweet\r\n except:\r\n mined['retweet_text'] = ''\r\n\r\n outtweets.extend([mined])\r\n\r\n return outtweets", "def process_tweets(tweets_response, keep_all=False, debug=False):\n tweets = tweets_response\n\n #print(json.dumps(tweets, indent=4, ensure_ascii=False))\n\n output_tweets = []\n for tweet in tweets:\n # loop through every tweet\n output_tweet = {}\n output_tweet['likes'] = 0\n for k, v in tweet.items():\n if k == \"favorite_count\" or k == \"retweeted_status\":\n # print('checking favorite_count at {}'.format(k))\n # print(v)\n if k == \"favorite_count\" and v:\n output_tweet['likes'] = v\n elif k == \"retweeted_status\" and v:\n # print(\"rt:\", v)\n try:\n output_tweet['likes'] = v['favorite_count']\n except:\n print('favorites not found')\n print(v)\n pass\n\n elif k == \"media\" and v:\n # turn media dict into img url\n output_tweet[k] = []\n for m in v:\n output_tweet[k].append(m['media_url_https'])\n\n elif k == \"id\" and v:\n # make url from id and dispose id\n output_tweet['url'] = \"https://twitter.com/anyuser/status/\" + str(v)\n\n elif k == \"retweet_count\":\n if v:\n if debug: print(' picking this: ', k, v)\n output_tweet[k] = v\n else:\n if debug: print(' skipping this: ', k, v)\n # not keeping those with 0 RT\n output_tweet[k] = 0\n\n elif k == \"created_at\":\n tweet_creation_time = str_2_datetime(v, input_format=time_format_twitter_created_at)\n tweet_checked_time = datetime.datetime.now(tz=pytz.utc)\n\n output_tweet['timestamp'] = {\n \"created\": datetime_2_str(tweet_creation_time, output_format=time_format_full_with_timezone),\n \"last_checked\": datetime_2_str(tweet_checked_time, output_format=time_format_full_with_timezone)\n }\n\n else:\n # keep k:v same\n if debug: print('keeping this: ', k, repr(v))\n output_tweet[k] = v\n\n print('num of likes: ', output_tweet['likes'])\n\n output_tweets.append(output_tweet)\n\n output = []\n if not keep_all:\n for o in output_tweets:\n if o['likes'] > 0 and o['retweet_count'] > 0:\n output.append(o)\n else:\n output = output_tweets\n\n return output", "def get_tweets():\n broken_json = read_tweets()\n #\n # Remove the last comma and wrap in a json list\n #\n parsed = json.loads('[%s]' % broken_json[:-1])\n return parsed", "def _get_tweets(self):\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n api = tweepy.API(auth)\n search = api.search(self.term, lang='en', count=100)\n\n print(f\"Getting tweets that mention '{self.term}', \"\n f\"this may take a while...\")\n\n save_tweet_text = [tweet._json['text'] for tweet in search]\n while len(save_tweet_text) < 1000:\n try:\n oldest = search[-1].id - 1\n search = api.search(self.term, lang='en', count=100, max_id=oldest)\n new_tweets = [tweet._json['text'] for tweet in search]\n save_tweet_text.extend(new_tweets)\n\n # Turn into a set to remove duplicated tweets, then back to list\n save_tweet_text = list(set(save_tweet_text))\n except IndexError:\n break\n\n print(f\"Done. {len(save_tweet_text)} Tweets received.\")\n return save_tweet_text", "def get_tweets():\n\n\tuser ='kaiserkumars'\n\t# api = twitter.Api(consumer_key='iJoZZuV7etVrJfE4K9ir8sIqa',\n\t# consumer_secret='uyJyWoP05z2MUKnggW7vHnIG2sckmM1aHRMgGveZLyrz8401Xs',\n\t# access_token_key='622588040-TYDgG1UlGUvA1hW8PA7mOG5CiMw0WiuPZlkoP8cc',\n\t# access_token_secret='laAmFjeLhWzOK7Y524VevdMdeLeNpnmCUmjee1AQU7osj')\n\tapi = twitter.Api(consumer_key=get_secret('consumer_key'),\n\t consumer_secret=get_secret('consumer_secret'),\n\t access_token_key=get_secret('access_token_key'),\n\t access_token_secret=get_secret('access_token_secret'))\n\n\tstatuses = api.GetUserTimeline(user_id=622588040,count=0)\n\t# print(statuses)\n\t# duplicate='UNIQUE constraint failed: mtwitter_weatherdata.location, core_weatherdata.metric, core_weatherdata.date'\n\tbulk_insert=[]\n\t# print(dir(TwitterData))\n\tfor s in statuses:\n\t\t# print(s)\n\t\tdt = parse(s.created_at)\n\t\t# print(dt)\n\t\tdata = TwitterData(org_name=s.user.name,profile_url=s.user.profile_image_url,tweet_id =s.id,screen_name=s.user.screen_name, tweet = s.text, date= dt, favCount =0)\n\t\tbulk_insert.append(data)\n\ttry:\n\t\tTwitterData.objects.bulk_create(bulk_insert)\n\t\tprint(\"Success.\")\n\texcept Exception as e:\n\t\t# if(str(e)==duplicate):\n\t\t# \tprint('Duplicate Data')\n\t\t# else:\n\t\tprint(str(e))\n\n\treturn statuses", "def tweet(self):\n try: \n return self._parsed_tweet\n except:\n if self.item_json:\n self._parsed_tweet = json.loads(self.item_json)\n else:\n self._parsed_tweet = {}\n return self._parsed_tweet", "async def add_tweet(self, tid=None): \n try:\n data=json.loads(self.request.body.decode('utf-8'))\n except: \n print(\"No data body!\")\n\n #print(\"Coordinates: {}\".format(data[\"coordinates\"]))\n if \"place\" in data:\n print(\"Place: {}\".format(data[\"place\"]))\n\n #print(\"User location: {}\".format(data[\"user\"][\"location\"]))\n #print(\"User lang: {}\".format(data[\"user\"][\"lang\"]))\n t=Tweet()\n t.tweet_id = tid\n t = self.fill_tweet(t, data)\n tweet_cache.append(t.to_dict())\n if \"retweeted_status\" in data:\n t.retweeted_status=data[\"retweeted_status\"]\n # \n # save the tweet\n #\n t.upsert()\n #\n # now handle the retweet\n #\n if \"retweeted_status\" in data:\n # this is a retweet so\n # do it once more for the original tweet\n tr=Tweet()\n tr.tweet_id = data[\"retweeted_status\"][\"id_str\"]\n tr = self.fill_tweet(tr, data[\"retweeted_status\"])\n tweet_cache.append(tr.to_dict())\n #tr.upsert()\n #r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #await self.fire_callbacks(r.json())\n #print(t.to_json(),file=ofile)\n #\n # get the embed html from twitter oembed API\n #\n r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #print(r.json())\n \n #print(self.__class__.callbacks)\n await self.fire_callbacks(r.json())\n #self.success(message=\"Added tweet id: {} \".format(str(id)), data=t.to_json(), format=\"json\", pure=True)", "def read_tweets(self)-> None:\n self.no_of_tweets = len(self.list_of_files)\n for i in range(0, self.no_of_tweets):\n # for i in range(0,10): # running a small loop for testing purpose\n try:\n with open(self.list_of_files[i]) as json_file:\n file = json.load(json_file)\n tweet = {'id': file['id']}\n try:\n tweet['created_time'] = file['retweeted_status']['created_at']\n tweet['text'] = file['retweeted_status']['full_text']\n except:\n tweet['created_time'] = file['created_at']\n tweet['text'] = file['full_text']\n self.tweets.append(tweet)\n except:\n print(\"Error for \",self.list_of_files[i])\n if i%1000 == 0:\n print(str(round(i/self.no_of_tweets,2)*100),\"% read\")\n print(\"All Tweets read into memory\")", "def sendTweets(self):\n\n if self.__status_type == 'link':\n\n for index, item in self.list.iterrows():\n\n title = item['title']\n url = item['url']\n message = (url + \" \" + title)[0:140]\n\n if self.__image == None:\n self.__api.update_status(status=message)\n else:\n self.__api.update_with_media(filename=self.__image, status=message)\n\n elif self.__status_type == 'single_msg':\n\n message = (self.__status)[0:140]\n\n if self.__image == None:\n self.__api.update_status(status=message)\n else:\n self.__api.update_with_media(filename=self.__image, status=message)\n\n elif self.__status_type == 'reply':\n\n for index, item in self.list.iterrows():\n\n message = (\".@\" + item['user'] + \" \" + self.__status)[0:140]\n\n try:\n if self.__image == None:\n self.__api.update_status(status=message, in_reply_to_status_id=item['id'])\n else:\n self.__api.update_with_media(filename=self.__image, status=message,\n in_reply_to_status_id=item['id'])\n except KeyError:\n print(\"List does not include necessary column(s).\")\n print(\"reply status type used when generating list based on Twitter search.\")\n print(\"Change search_on to twitter and create list.\")\n return\n\n elif self.__status_type == 'at':\n\n for index, item in self.list.iterrows():\n\n try:\n\n message = (\".@\" + item['user'] + \" \" + self.__status)[0:140]\n\n if self.__image == None:\n self.__api.update_status(status=message)\n else:\n self.__api.update_with_media(filename=self.__image, status=message)\n\n except KeyError:\n print(\"List does not include necessary column(s).\")\n print(\"at status type used when generating list based on Twitter search.\")\n print(\"Change search_on to twitter and create list.\")\n return\n\n elif self.__status_type == 'rt':\n\n for index, item in self.list.iterrows():\n try:\n self.__api.retweet(item['id'])\n except KeyError:\n print(\"List does not include necessary column(s).\")\n print(\"at status type used when generating list based on Twitter search.\")\n print(\"Change search_on to twitter and create list.\")\n return\n\n else:\n print(\"Invalid status type. Change status type through configure_tweet method.\")\n\n return", "def flatten_tweets(tweets_json):\r\n tweets_list = []\r\n \r\n # Iterate through each tweet\r\n for tweet in tweets_json:\r\n tweet_obj = json.loads(tweet)\r\n \r\n # Store the user screen name in 'user-screen_name'\r\n tweet_obj['user-screen_name'] = tweet_obj['user']['screen_name']\r\n \r\n # Check if this is a 140+ character tweet\r\n if 'extended_tweet' in tweet_obj:\r\n # Store the extended tweet text in 'extended_tweet-full_text'\r\n tweet_obj['extended_tweet-full_text'] = tweet_obj['extended_tweet']['full_text']\r\n \r\n if 'retweeted_status' in tweet_obj:\r\n # Store the retweet user screen name in 'retweeted_status-user-screen_name'\r\n tweet_obj['retweeted_status-user-screen_name'] = tweet_obj['retweeted_status']['user']['screen_name']\r\n\r\n # Store the retweet text in 'retweeted_status-text'\r\n tweet_obj['retweeted_status-text'] = tweet_obj['retweeted_status']['text']\r\n \r\n tweets_list.append(tweet_obj)\r\n return tweets_list", "def on_data(self, data):\n status = json.loads(data)\n # increase the counter\n self.counter += 1\n\n retweet, rt_user, tweet_text, created_time = organize_tweet(status) \n\n if status['user']['id_str'] in infos.twitterids:\n\n who = status['user']['id_str']\n\n try:\n replied_to = status['in_reply_to_screen_name']\n except:\n replied_to = 'NULL'\n \n else:\n \n who = status['user']['screen_name']\n \n try:\n replied_to = infos.twitterids[status['in_reply_to_user_id_str']]\n except:\n replied_to = 'NULL'\n \n tweet = {\n \n 'id': status['user']['id_str'], #status.user.id_str,\n 'who': who,\n 'replied_to': replied_to,\n 'retweeted': retweet, #status['retweeted'], #status.retweeted,\n 'retweeted_from': rt_user,\n 'text': tweet_text,\n 'timestamp' : created_time\n }\n\n #write to mongoDB here\n collection.insert_one(tweet)\n print(f'New tweet arrived: {tweet[\"text\"]}')\n\n\n # check if we have enough tweets collected\n if self.max_tweets == self.counter:\n # reset the counter\n self.counter=0\n # return False to stop the listener\n return False", "def on_data(self, data):\n\n t = json.loads(data)\n\n\n if 'extended_tweet' in t:\n text = t['extended_tweet']['full_text']\n else:\n text = t['text']\n\n\n is_tweet_reply = t['in_reply_to_status_id'] == None\n is_quote = t['is_quote_status'] == False\n\n if 'RT' not in t['text'] and is_tweet_reply and is_quote:\n\n tweet = {'text': text, 'username' : t['user']['screen_name'],\n 'number_of_followers' : t['user']['followers_count'],\n 'location' : t['user']['location'], 'number_of_friends' : t['user']['friends_count'], 'retweet_count' :\n t['retweet_count']}\n\n\n logging.critical('\\n\\n\\nNEW TWEET INCOMING: ' + tweet['text']) \n \n \n load_tweet_into_mongo(tweet)\n logging.critical('\\n\\n\\nSUCCESSFULLY DUMPED INTO MONGO!')", "def on_data(self, data):\n\n t = json.loads(data) \n tweet = {\n 'text': t['text'],\n 'username': t['user']['screen_name'],\n 'followers_count': t['user']['followers_count']\n }\n\n logging.critical(f'\\n\\n\\nTWEET INCOMING: {tweet[\"text\"]}\\n\\n\\n')\n tweet_collection.insert({'username' : tweet['username'],'followers_count' : tweet['followers_count'], 'text' : tweet['text']})", "def fetch_tweets(self, screen_name, count):\n return {}", "def get_tweets(self):\r\n now = datetime.datetime.now()\r\n tweet_json = self.api.get_tweets(self.last, now)\r\n self.last = now\r\n return [Tweet(x) for x in tweet_json]", "def merge_tweets_v3():\n filename_list = []\n for filename in os.listdir('.'):\n if filename.startswith(\"trecis\") and filename.endswith(\".json\"):\n filename_list.append(filename)\n filename_list = sorted(filename_list)\n\n formatted_tweet_list_train = []\n formatted_tweet_list_test = []\n count_inconsistent = 0\n for filename in filename_list:\n with open(filename, 'r', encoding='utf8') as f:\n for line in f:\n content = json.loads(line)\n formatted_content = json.loads(content['allProperties']['srcjson'])\n formatted_content['full_text'] = formatted_content['text']\n\n if 'entities' not in formatted_content:\n count_inconsistent += 1\n entities = dict()\n entities[\"symbols\"] = formatted_content['symbolEntities']\n entities[\"urls\"] = formatted_content['urlEntities']\n entities[\"hashtags\"] = formatted_content['hashtagEntities']\n entities[\"user_mentions\"] = formatted_content['userMentionEntities']\n entities[\"media\"] = formatted_content['mediaEntities']\n # To make the \"start\" and \"end\" API consistent with others\n for entity_name in [\"hashtags\", \"user_mentions\", \"urls\"]:\n for iEntity, entity in enumerate(entities[entity_name]):\n entity['indices'] = [entity['start'], entity['end']]\n entities[entity_name][iEntity] = entity\n formatted_content['entities'] = entities\n # Some other API convert\n formatted_content['retweet_count'] = formatted_content['retweetCount']\n formatted_content['favorite_count'] = formatted_content['favoriteCount']\n formatted_content['user']['favourites_count'] = formatted_content['user']['favouritesCount']\n formatted_content['user']['followers_count'] = formatted_content['user']['followersCount']\n formatted_content['user']['statuses_count'] = formatted_content['user']['statusesCount']\n formatted_content['user']['geo_enabled'] = formatted_content['user']['isGeoEnabled']\n formatted_content['user']['verified'] = formatted_content['user']['isVerified']\n formatted_content['user']['listed_count'] = formatted_content['user']['listedCount']\n formatted_content['user']['friends_count'] = formatted_content['user']['friendsCount']\n\n if filename.startswith(\"trecis2019-B\"):\n formatted_tweet_list_test.append(formatted_content)\n else:\n formatted_tweet_list_train.append(formatted_content)\n\n if count_inconsistent > 0:\n print(\"There are {} tweets have inconsistent API about the entities, \"\n \"and they are automatically converted.\".format(count_inconsistent))\n print(\"There are {0} tweets for training and {1} tweets for testing\".format(\n len(formatted_tweet_list_train), len(formatted_tweet_list_test)))\n\n outfile = '../data/all-tweets.txt'\n with open(outfile, 'w', encoding='utf8') as fout:\n for tweet in formatted_tweet_list_train:\n fout.write(json.dumps(tweet) + '\\n')\n\n outfile = '../data/all-tweets-2019.txt'\n with open(outfile, 'w', encoding='utf8') as fout:\n for tweet in formatted_tweet_list_test:\n fout.write(json.dumps(tweet) + '\\n')", "def get_tweets_from_json_file(self, filename, **kwargs):\n # empty list to store parsed tweets\n tweets = []\n if not os.path.isfile(filename):\n print('Could not find file: ', filename)\n return -1\n\n # get all handles from research subject\n handles = []\n for record in research.get_values(**kwargs):\n handles.append(record['handle'])\n\n with open(filename, mode='r', encoding='utf-8') as json_file:\n data = json.load(json_file)\n\n # parsing tweets one by one\n for account in data:\n print(account)\n if account['handle'] in handles:\n for record in account['tweets']:\n tweet = record[0].strip()\n timestamp = record[1]\n print(tweet)\n\n # empty dictionary to store required params of a tweet\n parsed_tweet = {}\n\n # saving text of tweet\n parsed_tweet['text'] = tweet.strip()\n # saving sentiment of tweet\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.strip())\n\n tweets.append(parsed_tweet)\n\n # return parsed tweets\n return tweets", "def get_all_tweets(screen_name: object):\r\n temptweets = []\r\n alltweets = []\r\n new_tweets = api.user_timeline(screen_name=screen_name, count=199)\r\n alltweets.extend(new_tweets)\r\n print(alltweets[1].id)\r\n oldest = alltweets[-1].id - 1\r\n while 0 < len(new_tweets) < 200:\r\n new_tweets = tweepy.Cursor(api.user_timeline, screen_name=screen_name, count=199, max_id=oldest).items(1500)\r\n alltweets.extend(new_tweets)\r\n for tweet in alltweets:\r\n if (not tweet.retweeted) and ('RT @' not in tweet.text):\r\n temptweets.append(tweet)\r\n oldest = alltweets[-1].id - 1\r\n print(\"Total tweets downloaded from %s are %s\" % (screen_name, len(temptweets)))\r\n return temptweets", "def gettweets(request):\n temp = json.loads(request.body)\n print (temp['hashtags'])\n return Response(tw_fetcher.gethashes(temp['hashtags']), status=status.HTTP_201_CREATED)", "def save_tweet(self, twitter) -> None:\n if isinstance(twitter, dict):\n json_data = twitter\n else:\n json_data = json.loads(twitter)\n\n try:\n breakpoint()\n self.db.tweets.find_one_and_update(\n {'id_str': json_data['id_str']},\n {'$inc': {'seq': 1}},\n projection={'seq': True, '_id': False},\n upsert=True,\n )\n except Exception as e:\n log.error(e)", "def __refresh_local_tweets(self):\n f_tweets = open(f'{TWEETS}', 'r')\n f_tweeted = open(f'{TWEETED}', 'r')\n\n try:\n self.tweets = json.load(f_tweets)\n self.tweeted = json.load(f_tweeted)\n finally:\n f_tweets.close()\n f_tweeted.close()", "def list_tweets():\n tweets = []\n tuples = query_db('''\n select message.*, user.* from message, user\n where message.author_id = user.user_id\n order by message.pub_date desc limit ?''', [PER_PAGE])\n for tuple in tuples:\n tweet = {}\n tweet[\"username\"] = tuple['username']\n tweet[\"email\"] = tuple['email']\n tweet[\"text\"] = tuple['text']\n tweet[\"pub_date\"] = tuple['pub_date']\n tweets.append(tweet)\n return jsonify({'tweets':tweets}),200", "def get_all_tweets(user, alltweets):\n\n #TODO check that user is a valid screen name??\n\n #make initial request for most recent tweets (200 is the maximum allowed count)\n new_tweets = api.user_timeline(user, count=200)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n #print alltweets[0].text\n\n #save the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n\n #print \"starting loop\"\n #keep grabbing tweets until there are no tweets left to grab\n while len(new_tweets) > 0:\n\n #all subsiquent requests starting with oldest\n new_tweets = api.user_timeline(user, count=200, max_id=oldest)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n\n #update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1", "def merge_tweets_v2():\n filename_list = []\n for filename in os.listdir('.'):\n if filename.startswith(\"trecis\") and filename.endswith(\".json\") and not filename.startswith(\"trecis2019-B\"):\n filename_list.append(filename)\n filename_list = sorted(filename_list)\n\n formatted_tweet_list = []\n formatted_tweet_list_2019 = []\n count_inconsistent = 0\n for filename in filename_list:\n with open(filename, 'r', encoding='utf8') as f:\n for line in f:\n content = json.loads(line)\n formatted_content = json.loads(content['allProperties']['srcjson'])\n formatted_content['full_text'] = formatted_content['text']\n\n if 'entities' not in formatted_content:\n count_inconsistent += 1\n entities = dict()\n entities[\"symbols\"] = formatted_content['symbolEntities']\n entities[\"urls\"] = formatted_content['urlEntities']\n entities[\"hashtags\"] = formatted_content['hashtagEntities']\n entities[\"user_mentions\"] = formatted_content['userMentionEntities']\n entities[\"media\"] = formatted_content['mediaEntities']\n # To make the \"start\" and \"end\" API consistent with others\n for entity_name in [\"hashtags\", \"user_mentions\", \"urls\"]:\n for iEntity, entity in enumerate(entities[entity_name]):\n entity['indices'] = [entity['start'], entity['end']]\n entities[entity_name][iEntity] = entity\n formatted_content['entities'] = entities\n # Some other API convert\n formatted_content['retweet_count'] = formatted_content['retweetCount']\n formatted_content['favorite_count'] = formatted_content['favoriteCount']\n formatted_content['user']['favourites_count'] = formatted_content['user']['favouritesCount']\n formatted_content['user']['followers_count'] = formatted_content['user']['followersCount']\n formatted_content['user']['statuses_count'] = formatted_content['user']['statusesCount']\n formatted_content['user']['geo_enabled'] = formatted_content['user']['isGeoEnabled']\n formatted_content['user']['verified'] = formatted_content['user']['isVerified']\n formatted_content['user']['listed_count'] = formatted_content['user']['listedCount']\n formatted_content['user']['friends_count'] = formatted_content['user']['friendsCount']\n\n if filename.startswith(\"trecis2019\"):\n formatted_tweet_list_2019.append(formatted_content)\n else:\n formatted_tweet_list.append(formatted_content)\n\n if count_inconsistent > 0:\n print(\"There are {} tweets have inconsistent API about the entities, \"\n \"and they are automatically converted\".format(count_inconsistent))\n print(\"There are {0} tweets for 2018 and {1} tweets for 2019\".format(\n len(formatted_tweet_list), len(formatted_tweet_list_2019)))\n\n outfile = '../data/all-tweets.txt'\n with open(outfile, 'w', encoding='utf8') as fout:\n for tweet in formatted_tweet_list:\n fout.write(json.dumps(tweet) + '\\n')\n\n outfile = '../data/all-tweets-2019.txt'\n with open(outfile, 'w', encoding='utf8') as fout:\n for tweet in formatted_tweet_list_2019:\n fout.write(json.dumps(tweet) + '\\n')", "def extract_tweets(path):\n dict_list = []\n\n for line in open(path):\n loaded = json.loads(line)\n dict_list.append(loaded)\n\n text = \"\"\n for item in dict_list:\n '''\n try:\n tweet = item[\"text\"]\n #filter(lambda x: x in set(string.printable), tweet)\n text += text\n except UnicodeEncodeError:\n pass\n '''\n tweet = str(item[\"text\"].encode('ascii', 'ignore'))\n #filter(lambda x: x in set(string.printable), tweet)\n text += tweet\n\n return text", "def get_tweets(user, num = 200):\n tweets = []\n \n for tweet in user.home_timeline(count = num):\n edited_tweet = tweet.text\n edited_tweet = edited_tweet.encode(encoding='UTF-8', errors='Ignore') \n tweets.append(edited_tweet)\n return tweets", "def read_tweets(data_path):\n\n json_list = []\n with open(data_path, 'r') as json_file_:\n for line in json_file_:\n json_file = json.dumps(ast.literal_eval(line))\n json_list += json_file,\n \n header = ['tweet_id', 'tweet', 'date', 'lang_twitter', 'retweeted', 'user_id']\n required_cols = itemgetter(*header)\n\n #with open(data_path) as f_input, open('out/'+data_path[:-4]+'.csv', 'w', newline='') as f_output:\n output = data_path.split(\"/\")[-1]\n output = 'out/{}.csv'.format(output[:-4])\n with open(output, 'w', newline='') as f_output:\n csv_output = csv.writer(f_output)\n csv_output.writerow(header)\n for row in json_list:\n if row.strip():\n tweet = json.loads(row)\n tweet['tweet_id'] = tweet['id_str']\n tweet['tweet'] = tweet['extended_tweet']['full_text'] if (\"extended_tweet\" in tweet or \"full_text\" in tweet) and bool(tweet[\"truncated\"]) else tweet['text']\n tweet['date'] = tweet['created_at']\n tweet['lang_twitter'] = tweet['lang']\n tweet['user_id'] = tweet['user']['id_str']\n csv_output.writerow(required_cols(tweet))\n \n return True", "def grab_tweets():\n\n tweets = []\n long_tweets = []\n\n for each in lists:\n tweets = tweets + twitter.GetListTimeline(list_id=each.id,\n count=count,\n include_rts=True)\n for tweet in tweets:\n if len(tweet.text) >= min_tweet_len:\n long_tweets.append(tweet)\n shuffle(long_tweets)\n\n if len(long_tweets) >= num_tweets:\n return long_tweets[:num_tweets]\n else:\n return long_tweets", "def twitter(n=1):\n tweet = get_tweet(TWITTER_NAME, n)\n tweet_info = {\n 'text': tweet.text,\n 'date': tweet.created_at.strftime('%A, %B %d'),\n 'time': tweet.created_at.strftime('%H:%M'),\n 'latest': (int(n) == 1), # True if n is one, else False.\n }\n return jsonify(tweet_info)", "def users_being_followed_tweets():\n username = request.authorization.username\n tweets = []\n\n user_id = get_user_id(username);\n tuples = query_db('''\n select message.*, user.* from message, user\n where message.author_id = user.user_id and (\n user.user_id = ? or\n user.user_id in (select whom_id from follower\n where who_id = ?))\n order by message.pub_date desc limit ?''',\n [user_id, user_id, PER_PAGE])\n\n for tuple in tuples:\n tweet = {}\n tweet[\"message_id\"] = tuple['message_id']\n tweet[\"author_id\"] = tuple['author_id']\n tweet[\"text\"] = tuple['text']\n tweet[\"pub_date\"] = tuple['pub_date']\n tweet[\"username\"] = tuple['username']\n tweet[\"email\"] = tuple['email']\n tweets.append(tweet)\n\n return jsonify({'tweets': tweets}), 200", "def get_tweets(self, user, count):\n topTweetsList = self.api.user_timeline(screen_name=user, count=count, tweet_mode='extended')\n clnTweets = {}\n for tweet in topTweetsList:\n clnTweets[processTweet(getNonRetweet(tweet))] = ({'like':getFavoriteCount(tweet),'RT':getNumRetweet(tweet),'follower':getNumFollowers(tweet)}) \n\n tweetTxt = [twt for twt in clnTweets.keys()]\n \n if user in self.userTweetsStat:\n self.userTweetsStat[user].append(clnTweets)\n else:\n tmp = []\n tmp.append(clnTweets)\n self.userTweetsStat[user] = tmp\n return tweetTxt, self.userTweetsStat", "def insert_tweet(status):\n status['replies'] = []\n return db.tweets.insert(status)", "def get_tweets():\n clean_tweetdb.delay()\n db_tweets = Tweet.objects.all()\n max_id = min([tweet.tweet_id for tweet in db_tweets])\n tweets = api.search(\n q='#python',\n max_id=max_id,\n count=100\n )\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n\n for i, j, k, l, m, n in zip(\n tweets_id,\n tweets_date,\n tweets_source,\n tweets_favorite_cnt,\n tweets_retweet_cnt,\n tweets_text,\n ):\n try:\n Tweet.objects.create(\n tweet_id=i,\n tweet_date=j,\n tweet_source=k,\n tweet_favorite_cnt=l,\n tweet_retweet_cnt=m,\n tweet_text=n,\n )\n except IntegrityError:\n pass", "def Pull_Relevant(flist, DateRange, TermList, OutFile):\n\n TweetCount=0\n for Filename in flist:\n Tweetset_Current = \"Start\"\n print(Filename)\n input_file = open(Filename, 'r')\n raw_batch = islice(input_file, None)\n with open(OutFile, 'a') as f: # append the batch, and close file each time.\n for current_line in raw_batch:\n tweet_item = json.loads(current_line)\n if RelevantTweet(tweet_item, TermList, DateRange):\n f.write(json.dumps(tweet_item))\n f.write('\\n')\n TweetCount=TweetCount+1\n return(TweetCount)", "def process_json_line(jline):\n if jline: # filter out keep-alive new lines\n text = jline\n try:\n tweet = json.loads(text)\n except BaseException as e:\n print(e)\n print(\"Unable to load text as json:\")\n print(text)\n return # nothing to process after all\n if filter_tweet(tweet) and examine_user_timeline(tweet['user']['screen_name']) and \\\n ('retweeted_status' not in tweet or ('retweeted_status' in tweet and \\\n examine_user_timeline(tweet['retweeted_status']['user']['screen_name']))):\n print(\"Matched tweet: https://twitter.com/%s/status/%s\" % (\n tweet['user']['screen_name'], tweet['id_str']))\n print(\"@%s (%s) : %s\" % (\n tweet['user']['screen_name'],\n tweet['source'],\n tweet['text']))\n retweet(tweet['id_str'])\n if 'warning' in tweet:\n print(\"==== WARNING !!! ====\")\n print(json.dumps(tweet, indent=4)) #TODO: fix ujson not supporting indent=\n print(\"==== WARNING !!! ====\")", "def __update_local_tweets(self):\n f_tweets = open(f'{TWEETS}', 'w')\n f_tweeted = open(f'{TWEETED}', 'w')\n try:\n f_tweets.write(json.dumps(self.tweets, sort_keys=True, indent=4))\n f_tweeted.write(json.dumps(self.tweeted, sort_keys=True, indent=4))\n finally:\n f_tweets.close()\n f_tweeted.close()", "def tweet(self, tweet, at=None):\n if tweet.strip() == \"\":\n return\n\n num_tweets, tweets = self._divide_tweet(tweet, at)\n if num_tweets > 0:\n # replace @'s with #'s and convert unicode emojis before tweeting\n [self.api.update_status(tw.replace(\"@\", \"#\").encode(\"utf-8\")) for tw in tweets]\n self.log(f\"Tweeted: {' '.join(tweets)}\")\n return tweets[0]", "def get_tweets(self):\r\n return self.tweets", "def extract_tweet_info_from_local_file(tweet_js_path, max_extract=None, get_cleaned_df=False, item_to_extract=['id_str', 'created_at'], output_path=None, begin=None, end=None):\r\n\r\n if max_extract is None: # extract all tweets by default\r\n with open(tweet_js_path, encoding='utf-8') as f:\r\n all_data = f.read()\r\n max_extract = all_data.count('\\\"tweet\\\"')\r\n\r\n final_file_name = 'parsed_tweets_df.csv' if get_cleaned_df else 'parsed_tweets.json'\r\n if output_path is None:\r\n output_path = final_file_name\r\n elif '.csv' not in output_path:\r\n output_path = os.path.join(output_path, final_file_name)\r\n\r\n if os.path.isfile(final_file_name):\r\n print(\"Found {}, assumed already parsed. Exiting\".format(final_file_name))\r\n return\r\n else:\r\n # do the actual extraction\r\n extracted_info = []\r\n with open(tweet_js_path, encoding='utf-8') as f:\r\n res = get_tweet_object_from_tweet_js(f, max_extract)\r\n\r\n print(\"Extracted {} tweet objects.\".format(len(res)))\r\n\r\n begin_mark = int(begin) if begin is not None else 0\r\n end_mark = int(end) if end is not None else len(res)\r\n\r\n for obj in res[begin_mark:end_mark]:\r\n tmp = []\r\n json_obj = json.loads(obj)\r\n if get_cleaned_df:\r\n for item in item_to_extract: # assume that item is a valid attribute of a status object\r\n tmp.append(json_obj[item])\r\n extracted_info.append(tmp)\r\n else: # want the actual Tweet object\r\n extracted_info.append(json_obj)\r\n\r\n if get_cleaned_df:\r\n formatted_df = pd.DataFrame(extracted_info, columns=item_to_extract)\r\n formatted_df.to_csv(output_path, index=False)\r\n else:\r\n with open(output_path, 'w', encoding='utf8') as file:\r\n file.write(json.dumps(extracted_info, sort_keys=True, indent=4, ensure_ascii=False))", "def insert_tweets(post):\n db_file = dbFile\n try:\n conn = sqlite3.connect(db_file)\n except Exception as e:\n print(e)\n for i in range(0,len(post['id_str'])):\n tweet={}\n tweet['user_id']=post['user_id']\n tweet['created_at'] = post['created_at'][i]\n tweet['id_str'] = post['id_str'][i]\n tweet['text'] = post['text'][i]\n tweet['source'] = post['source'][i]\n tweet['truncated'] = post['truncated'][i]\n tweet['in_reply_to_status_id_str'] = post['in_reply_to_status_id_str'][i]\n tweet['in_reply_to_screen_name'] = post['in_reply_to_screen_name'][i]\n tweet['coordinatesNumber'] = post['coordinatesNumber'][i]\n tweet['coordinates'] = post['coordinates'][i]\n tweet['coordinatesType'] = post['coordinatesType'][i]\n tweet['placeCountry'] = post['placeCountry'][i]\n tweet['placeCountryCode'] = post['placeCountryCode'][i]\n tweet['placeFullName'] = post['placeFullName'][i]\n tweet['placeID'] = post['placeID'][i]\n tweet['placeName'] = post['placeName'][i]\n tweet['placeType'] = post['placeType'][i]\n tweet['placeURL'] = post['placeURL'][i]\n tweet['quoted_status_id_str'] = post['quoted_status_id_str'][i]\n tweet['is_quote_status'] = post['is_quote_status'][i]\n tweet['retweeted_status'] = post['retweeted_status'][i]\n tweet['quote_count'] = post['quote_count'][i]\n tweet['reply_count'] = post['reply_count'][i]\n tweet['retweet_count'] = post['retweet_count'][i]\n tweet['favorite_count'] = post['favorite_count'][i]\n tweet['hashtagsNumber'] = post['hashtagsNumber'][i]\n tweet['hashtags'] = post['hashtags'][i]\n tweet['urls'] = post['urls'][i]\n tweet['urlsNumber'] = post['urlsNumber'][i]\n tweet['user_mentionsNumber'] = post['user_mentionsNumber'][i]\n tweet['user_mentions'] = post['user_mentions'][i]\n tweet['mediaNumber'] = post['mediaNumber'][i]\n tweet['mediaURLs'] = post['mediaURLs'][i]\n tweet['mediaType'] = post['mediaType'][i]\n tweet['symbolsNumber'] = post['symbolsNumber'][i]\n tweet['symbols'] = post['symbols'][i]\n tweet['pollsNumber'] = post['pollsNumber'][i]\n tweet['polls'] = post['polls'][i]\n tweet['possibly_sensitive'] = post['possibly_sensitive'][i]\n tweet['filter_level'] = post['filter_level'][i]\n tweet['lang'] = post['lang'][i]\n tweet['matching_rulesNumber'] = post['matching_rulesNumber'][i]\n tweet['matching_rulesTag'] = post['matching_rulesTag'][i]\n tweet['matching_rulesID'] = post['matching_rulesID'][i]\n tweet['collected_at'] = strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n sqlite_insert(conn, 'GTapp_tweets', tweet)", "def get_tweets(filename):\n try:\n tweets = []\n pd_tweets = pd.read_json(filename, lines=True) # use parameter lines=True to read the file as a json object per line\n pd_tweets = pd_tweets[pd_tweets.text.notnull()]['text']\n tweets = pd_tweets.to_list()\n return tweets\n except:\n print(\"Something went wrong parsing the file \" + filename)", "def process_tweet(json_data):\n text = json_data.get('text')\n\n # Strip URLs.\n for url in json_data.get('entities').get('urls', []):\n text = text.replace(url.get('url', ''), 'http')\n\n # Tokenize text.\n tokens = twitter_tokenizer.tokenize(text)\n\n # Remove punctuation and stopwords.\n tokens = [x for x in tokens if x not in punctuation_set and x not in stopwords_set]\n\n # Stem the tokens.\n if toggles['stem_tokens']:\n tokens = [stemmer.stem(x) for x in tokens]\n\n result = {}\n result['stemmed'] = tokens\n result['user'] = json_data.get('user')\n\n return result", "def dump_list_of_rts():\n rtlist = get_list_of_rts()\n if rtlist:\n for tweet in rtlist:\n print(' # Extracted from https://twitter.com/%s/status/%s' %\n (tweet['retweeted_status']['user']['screen_name'],\n tweet['retweeted_status']['id_str']))\n print(' (ur\"\"\"%s\"\"\", False),' %\n tweet['retweeted_status']['text'])", "def load_tweets(fname):\n tweets = []\n for line in open(fname):\n tweets.append(json.loads(line))\n return tweets", "def predict_the_future(selfie, next_id):\n if next_id:\n next_link = 'https://twitter.com/%s/status/%s' % (selfie.username, next_id)\n message = 'BEHOLD! A link to this very tweet! %s' % next_link\n elif not selfie.tweets:\n message = 'First?'\n else:\n message = 'Second?'\n\n tweet = selfie.api.statuses.update(status=message)\n\n # Delete the tweet if it was a throwaway or it didn't work.\n if next_id is None or tweet['id'] != str(next_id):\n selfie.garbage.append(tweet)\n\n return tweet", "def list_user_tweets(username):\n userdata = query_db('select * from user where username = ?',\n [username], one=True)\n if userdata is None:\n abort(404)\n else:\n user_details = {\"username\": userdata['username'],\"user_id\":userdata['user_id']}\n\n followed = False\n if request.json.get('user_id') is not None:\n followed = query_db('''select 1 from follower where\n follower.who_id = ? and follower.whom_id = ?''',\n [request.json.get('user_id'), user_details.get('user_id')],\n one=True) is not None\n\n user_tweets = []\n if user_details is None:\n return jsonify({'message': 'User not found'}), 404\n tuples = query_db('''\n select message.*, user.* from message, user where\n user.user_id = message.author_id and user.user_id = ?\n order by message.pub_date desc limit ?''',\n [user_details['user_id'], PER_PAGE])\n\n for tuple in tuples:\n user_tweet = {}\n user_tweet[\"username\"] = tuple['username']\n user_tweet[\"email\"] = tuple['email']\n user_tweet[\"text\"] = tuple['text']\n user_tweet[\"pub_date\"] = tuple['pub_date']\n user_tweets.append(user_tweet)\n\n return jsonify({'user_tweets':user_tweets, 'followed' : followed, 'user_details':user_details}),200", "def on_status(self, data):\n print data\n print data.text\n if not data.text:\n return\n #features = trainer.get_feature_vector(data._json)\n if self.classifier and not self.classifier.predict(features):\n return\n\n print \"Tweet arriving\"\n\n hashtags, user_mentions, urls = join_entity(data._json)\n\n user = data.author\n\n # BUG in python (fixed in python 3.2): %z not supported in strptime\n statement = \"%s;%s\" % (self.insert_tweet_query, self.insert_user_query)\n\n try:\n cursor = self.connection.cursor()\n cursor.execute(statement, (\n data.id, user.id, data.created_at.strftime(\"%Y-%m-%d %H:%M:%S\"),\n data.in_reply_to_user_id_str, data.in_reply_to_status_id_str,\n data.retweeted, data.favorited, data.favorite_count,\n data.retweet_count, data.source, data.text, urls, hashtags,\n user_mentions, user.id, user.created_at, user.description,\n user.favourites_count, user.followers_count,\n user.friends_count, user.statuses_count,\n user.listed_count, user.time_zone,\n user.verified, user.geo_enabled,\n user.lang, user.location,\n user.screen_name))\n cursor.close()\n self.connection.commit()\n except Exception as inst:\n print type(inst) # the exception instance\n print inst.args # arguments stored in .args\n print inst # __str__ allows args to printed directly\n\n if not self.queue.empty():\n self.classifier = self.queue.get()\n return False # force a restart of the stream\n else:\n return True", "def get_tweet_object_from_tweet_js(seq, num_of_tweet_block):\r\n\r\n data = \"\"\r\n res = []\r\n curr = 0\r\n start_flag = False\r\n \r\n for line in seq:\r\n line = line.rstrip()\r\n \r\n if \"\\\"tweet\\\"\" in line:\r\n start_flag = True\r\n if line != \"}, {\" and start_flag:\r\n if 'full_text' in line:\r\n line = line.replace('full_text', 'text')\r\n data += line\r\n if line == \"}, {\":\r\n start_flag = False\r\n curr += 1\r\n # remove the extra \"tweet\" in front\r\n res.append(data.split(\"\\\"tweet\\\" : \")[1])\r\n data = \"\"\r\n if curr >= num_of_tweet_block:\r\n return res\r\n \r\n return res # in case we have parsed all lines but still fewer than `num_of_tweet_block`, return the result anyways\r", "def merge_tweets_v1():\n formatted_tweet_list = []\n for filename in os.listdir('.'):\n if filename.endswith(\".json\"):\n with open(filename, 'r', encoding='utf8') as f:\n for line in f:\n formatted_content = dict()\n content = json.loads(line)\n formatted_content['id_str'] = content['identifier']\n formatted_content['full_text'] = content['text']\n\n metadata = content['metadata']\n entities = dict()\n for feature_name in ['hashtags', 'symbols', 'user_mentions', 'urls']:\n attribute_name = 'entities.{}'.format(feature_name)\n if attribute_name in metadata:\n entities[feature_name] = json.loads(metadata[attribute_name])\n entities['media'] = content['media']\n formatted_content['entities'] = entities\n\n formatted_content['created_at'] = metadata['created_at']\n for feature_name in ['retweet_count', 'favorite_count']:\n formatted_content[feature_name] = int(metadata[feature_name])\n formatted_content['user'] = dict()\n formatted_content['user']['verified'] = metadata['user.verified'] == 'true'\n if 'coordinates' in metadata and metadata['coordinates'] != 'null':\n formatted_content['coordinates'] = metadata['coordinates']\n\n formatted_tweet_list.append(formatted_content)\n\n outfile = '../data/tweets-content-merged.txt'\n with open(outfile, 'w', encoding='utf8') as fout:\n for tweet in formatted_tweet_list:\n fout.write(json.dumps(tweet) + '\\n')", "def send_tweet(in_list):\n\n with open('credentials.json') as json_file:\n creds = json.load(json_file)\n\n twit_creds = creds['twitter']\n consumer_key = twit_creds['consumer_key']\n consumer_secret = twit_creds['consumer_secret']\n access_token = twit_creds['access_token']\n access_token_secret = twit_creds['access_token_secret']\n\n for dev in in_list:\n to_tweet = f\"New #Aberdeen AQ device found. ID = {dev}. See it on a map: http://uk.maps.luftdaten.info/#9/57.3406/-1.9226 \"\n # tweet the message\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n tweepyapi = tweepy.API(auth)\n tweepyapi.update_status(to_tweet)\n # print(\"Tweeted\")", "def get_tweet(self, id):\r\n return self.tweets[id]", "def loadTweets(filename):\n tweets = open(filename, 'r').read().splitlines()\n print \"Loading %d tweets from %s ...\" % (len(tweets), filename)\n tweetObjects = []\n for tweet in tweets:\n try:\n js = json.loads(tweet)\n if (not ('place' in js)) or js['place'] == None:\n continue\n elif (not ('full_name' in js['place'])):\n continue\n elif (not ('geo' in js)) or js['geo'] == None:\n continue\n elif (not ('coordinates' in js['geo'])):\n continue\n coords = js['geo']['coordinates']\n place = js['place']\n tweetObject = Tweet(js['text'], place['full_name'], coords[0], coords[1], place['country'], js['created_at'])\n tweetObjects.append(tweetObject)\n except ValueError:\n pass\n print \"Loaded %d tweets\" % len(tweetObjects)\n return tweetObjects", "def get_tweet(username, n):\n return twitterAPI.home_timeline(count=n)[-1:][0] # return specified tweet", "def fill_tweet(self, t, data):\n t.text=data[\"text\"]\n #\n # update the hashtags cache\n #\n try:\n t.hashtags=data[\"entities\"][\"hashtags\"] \n for htag in t.hashtags:\n #print(\"adding to hashtags: {} to cache:\".format(htag[\"text\"], ))\n if htag[\"text\"] in hash_cache:\n hash_cache[htag[\"text\"]] += 1\n else:\n hash_cache[htag[\"text\"]] = 1\n except:\n t.hashtags=[]\n #\n # update the country cache\n #\n try:\n # see: https://bitbucket.org/richardpenman/reverse_geocode/src/default/\n #country = reverse_geocode.search(data[\"coordinates\"][\"coordinates\"][0])[\"country\"]\n country = data[\"place\"][\"country_code\"]\n if country in country_cache:\n country_cache[country] += 1\n else:\n country_cache[country] = 1\n except:\n print(\" .... Could not identify county by coordinates\")\n \n #\n # update the user cache\n #\n try:\n user_id = \"@\" + data[\"user\"][\"screen_name\"]\n if user_id in user_cache:\n user_cache[user_id] += 1\n else:\n user_cache[user_id] = 1\n except:\n print(\" ERR No User: should never happen\")\n #\n # update the tweets per minute cache\n # \n\n #tweets_descending = OrderedDict(sorted(self.application.tweet_cache.items(), key=lambda kv: kv[1], reverse=True))\n #hash_descending = OrderedDict(sorted(hash_cache.items(), key=lambda kv: kv[1], reverse=True))\n #for counter, elem in enumerate(hash_descending):\n # if counter < 9:\n # print(\"hash top #{} : {} : {}\".format(counter, elem, str(hash_descending[elem])))\n # else:\n # break\n try:\n t.user_screenname=data[\"user\"][\"screen_name\"]\n except:\n t.user_screenname=\"\"\n try:\n t.profile_image_url_https = data[\"user\"][\"profile_image_url_https\"]\n except:\n t.profile_image_url_https = \"\"\n #\n # update the tweets cache\n #\n try:\n t.timestamp = dateutil.parser.parse(data[\"created_at\"])\n except:\n t.timestamp = datetime.datetime.utcnow()\n return t", "def tweet_results(dl):\n\n TWT = twt.get_api()\n try:\n if(DEBUG):\n print(\"=====Mock Posting=====\")\n print(MSG.format(ISP, dl, ISP_DL))\n print(\"=====End=====\", flush=True)\n else:\n TWT.update_status(MSG.format(ISP, dl, ISP_DL))\n except:\n print(\"error posting to twitter\", flush=True)", "def on_status(self, status):\n try:\n \n time = status.created_at\n text = str(status.text)\n \n if text.startswith('RT'):\n text = text.split('RT')[1].replace(',','')\n print(text)\n print(time)\n \n line = str(text + ',' + str(time) + '\\n')\n output = open('tweets.txt','a')\n output.write(line)\n output.close() \n else:\n text = text.split('RT')[0].replace(',','')\n print(text)\n \n line = str(text + ',' + str(time) + '\\n')\n output = open('tweets.txt','a')\n output.write(line)\n output.close()\n\n # count\n self.counter += 1\n print(self.counter)\n \n if self.counter < self.limit:\n return True\n else:\n self.counter ==0\n twitterStream.disconnect()\n \n \n except BaseException as e:\n print('failed on_status,',str(e))", "def read_twitter_json(f):\n tweets = list()\n with open(f) as json_file:\n for line in json_file:\n tweets.append(json.loads(line))\n return tweets", "def reply_to_tweet():\n\n print('retrieving and replying to tweets...')\n all_mentions = api.mentions_timeline()\n\n # The content of the reply that the bot will send.\n rap_message = ' yo yo yo yo'\n\n for mention in reversed(all_mentions):\n\n # print(str(mention.id) + '-' + mention.text)\n\n if 'rap for me' in mention.text.lower():\n # checks if the bot received a request to deliver a rap\n print('received a request')\n print('dropping a new single...')\n # Checks if the latest mention came from the same person.\n if mention.id == mention.id[0]:\n # Posts a tweet saying the bot is 'too tired' and won't generate a new rap.\n api.update_status('@' + mention.user.screen_name + ' yo sorry I am too tired right now')\n else:\n # Posts a tweet with the rap to the user.\n api.update_status('@' + mention.user.screen_name + rap_message, mention.id)\n print('single dropped.')", "def get_tweets_from_username(api, screen_name):\n\n # initialize a list to hold all the Tweets\n alltweets = []\n output = []\n\n # make initial request for most recent tweets\n # (200 is the maximum allowed count)\n new_tweets = api.user_timeline(screen_name=screen_name, count=200, tweet_mode=\"extended\")\n\n # save most recent tweets\n alltweets.extend(new_tweets)\n\n # save the id of the oldest tweet less one to avoid duplication\n oldest = alltweets[-1].id - 1\n\n # keep grabbing tweets until there are no tweets left\n while len(new_tweets) > 0:\n print(\"Getting tweets before %s\" % (oldest))\n\n # all subsequent requests use the max_id param to prevent\n # duplicates\n new_tweets = api.user_timeline(screen_name=screen_name, count=200, max_id=oldest, tweet_mode=\"extended\")\n\n # save most recent tweets\n alltweets.extend(new_tweets)\n\n # update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n print(\"... %s tweets downloaded so far\" % (len(alltweets)))\n\n # transform the tweepy tweets into a 2D array that will\n for tweet in alltweets:\n output.append([tweet.id_str,\n tweet.created_at,\n tweet.full_text,\n tweet.in_reply_to_screen_name,\n tweet.user.name,\n tweet.user.location,\n tweet.user.followers_count,\n tweet.user.friends_count,\n tweet.geo,\n tweet.coordinates,\n tweet.retweet_count,\n tweet.favorite_count,\n tweet.lang,\n tweet.retweeted])\n\n # Convert to dataframe\n df = pd.DataFrame.from_records(output, columns=[\"id_str\",\n \"created_at\",\n \"full_text\",\n \"in_reply_to_screen_name\",\n \"user_name\",\n \"user_location\",\n \"user_followers_count\",\n \"user_friends_count\",\n \"geo\",\n \"coordinates\",\n \"retweet_count\",\n \"favorite_count\",\n \"lang\",\n \"retweeted\"])\n return df", "def open_tweet_obj(tweets_obj):\n tweets = []\n for tweet_obj in tweets_obj:\n for tweet in tweet_obj:\n tweets.append(tweet)\n return tweets", "def get_tweets(api, listOfTweets, keyword, numOfTweets=20, date_since='2019-1-1', lang=\"en\"):\n spinner = yaspin()\n spinner.start()\n for tweet in tweepy.Cursor(api.search, q=keyword, lang=lang, since=date_since).items(numOfTweets):\n # Add tweets in this format\n dict_ = {'Screen Name': tweet.user.screen_name,\n 'User Name': tweet.user.name,\n 'Tweet Created At': str(tweet.created_at),\n 'Tweet Text': tweet.text,\n 'Cleaned Tweet Text': func.clean_tweets(tweet.text),\n 'User Location': str(tweet.user.location),\n 'Tweet Coordinates': str(tweet.coordinates),\n 'Retweet Count': str(tweet.retweet_count),\n 'Retweeted': str(tweet.retweeted),\n 'Phone Type': str(tweet.source),\n 'Favorite Count': str(tweet.favorite_count),\n 'Favorited': str(tweet.favorited),\n 'Replied': str(tweet.in_reply_to_status_id_str)\n }\n listOfTweets.append(dict_)\n spinner.stop()\n return listOfTweets", "def get_tweet_list(user_handle):\n client = language.LanguageServiceClient()\n\n tweet_list = twitter.get_tweets(handle=user_handle)\n\n if tweet_list[0] == \"34\":\n return tweet_list\n\n for i in range(len(tweet_list)):\n\n content = tweet_list[i].get(\"text\")\n\n document = types.Document(\n content=content, type=enums.Document.Type.PLAIN_TEXT)\n annotations = client.analyze_sentiment(document=document)\n\n # Print the results\n # print_result(annotations)\n\n score = annotations.document_sentiment.score\n magnitude = annotations.document_sentiment.magnitude\n\n tweet_list[i][\"score\"] = score\n tweet_list[i][\"magnitude\"] = magnitude\n\n # print(tweet_list[i])\n\n return tweet_list", "def get_tweets(self):\n\t\ttweets = ''\n\t\tfor each in self.tweets_posted:\n\t\t\ttweets += each.timeline_format() + '\\n'\n\t\ttweets = tweets.strip('\\n')\n\t\treturn tweets", "def get_tweets():\n if not Tweet.objects.all():\n # If the db is empty, don't get max_id.\n tweets = api.search(\n q='#python',\n count=100\n )\n else:\n # If the db is not empty, get max_id.\n subtask(clean_tweetdb)\n max_id = min([tweet.tweet_id for tweet in Tweet.objects.all()])\n tweets = api.search(\n q='#python',\n max_id=max_id,\n count=100\n )\n\n # Store the tweet data in lists.\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n\n # Iterate over these lists and add data to db.\n for i, j, k, l, m, n in zip(\n tweets_id,\n tweets_date,\n tweets_source,\n tweets_favorite_cnt,\n tweets_retweet_cnt,\n tweets_text,\n ):\n try:\n # Check that they are valid.\n Tweet.objects.create(\n tweet_id=i,\n tweet_date=j,\n tweet_source=k,\n tweet_favorite_cnt=l,\n tweet_retweet_cnt=m,\n tweet_text=n,\n )\n except IntegrityError:\n pass", "async def tweet_feeder(self): \n try:\n data=json.loads(self.request.body.decode('utf-8'))\n except: \n print(\"No data body!\")\n\n t=Tweet()\n t.tweet_id = data[\"tweet_id\"]\n t.text=data[\"text\"]\n #\n # update the hashtags cache\n #\n try:\n t.hashtags=data[\"hashtags\"] \n for htag in t.hashtags:\n #print(\"adding to hashtags: {} to cache:\".format(htag[\"text\"], ))\n if htag[\"text\"] in hash_cache:\n hash_cache[htag[\"text\"]] += 1\n else:\n hash_cache[htag[\"text\"]] = 1\n except:\n t.hashtags=[]\n \n #\n # update the user cache\n #\n try:\n user_id = \"@\" + data[\"user_screenname\"]\n if user_id in user_cache:\n user_cache[user_id] += 1\n else:\n user_cache[user_id] = 1\n except:\n print(\" ERR No User: should never happen\")\n\n try:\n t.user_screenname=data[\"user_screenname\"]\n except:\n t.user_screenname=\"\"\n try:\n t.profile_image_url_https = data[\"profile_image_url_https\"]\n except:\n t.profile_image_url_https = \"\"\n #\n # update the tweets cache\n #\n try:\n t.timestamp = data[\"timestamp\"]\n except:\n t.timestamp = datetime.datetime.utcnow()\n tweet_cache.append(t.to_dict())\n \n #\n # get the embed html from twitter oembed API\n #\n r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #print(r.json())\n \n #print(self.__class__.callbacks)\n await self.fire_callbacks(r.json())\n #self.success(message=\"Added tweet id: {} \".format(str(id)), data=t.to_json(), format=\"json\", pure=True)", "def get_tweets():\n\n # Read bearer token from secrets file\n with open(\"./secrets.yml\", \"r\") as f:\n bearer_token = yaml.load(f, Loader=yaml.FullLoader)[\"BEARER_TOKEN\"]\n\n # Set start and end times as current time rounded down to nearest minute with supplied offset\n dt_fmt = \"%Y-%m-%dT%H:%M:00Z\"\n dt_now = datetime.datetime.now().replace(second=0, microsecond=0)\n start_time_offset = int(sys.argv[1])\n end_time_offset = int(sys.argv[2])\n dt_end = dt_now - datetime.timedelta(minutes=end_time_offset)\n dt_start = dt_now - datetime.timedelta(minutes=start_time_offset)\n dt_end = dt_end.strftime(dt_fmt)\n dt_start = dt_start.strftime(dt_fmt)\n\n # Make request, checking for mentions in specified time period\n logging.info(\"Getting mentions from Twitter\")\n uri = \"https://api.twitter.com/2/tweets/search/recent\"\n headers = {\"Authorization\": f\"Bearer {bearer_token}\"}\n query = {\"query\": f\"@{ACCOUNT_NAME}\",\n \"expansions\" : \"author_id\",\n \"user.fields\" : \"username\",\n \"start_time\" : dt_start,\n \"end_time\" : dt_end}\n response = requests.get(uri, headers=headers, params=query)\n\n # Make connection to local database\n connection = sqlite3.connect(\"../database/procrystaldb.db\")\n cursor = connection.cursor()\n\n # Get current total number of rows in database\n cursor.execute(\"SELECT COUNT(*) FROM Twitter;\")\n initial_rows = cursor.fetchall()[0][0]\n\n # Get usernames and tweet ids from tweets and save to database\n if response.status_code == 200:\n content = response.json()\n num_results = content[\"meta\"][\"result_count\"]\n if num_results > 0:\n # First get dictionary of usernames\n user_id_to_name = {}\n for user in content[\"includes\"][\"users\"]:\n user_id_to_name[user[\"id\"]] = user[\"username\"]\n # Then get tweet id, username and save to database\n for result in content[\"data\"]:\n # if KEYWORD in result[\"text\"].lower():\n tweet_id = result[\"id\"]\n username = user_id_to_name[result[\"author_id\"]]\n sql_insert = f\"\"\"\n INSERT OR IGNORE INTO Twitter (tweet_id, username, reply_sent)\n VALUES ('{tweet_id}', '{username}', false);\n \"\"\"\n cursor.execute(sql_insert)\n logging.info(f\"Mentions fetched: {num_results}\")\n else:\n logging.error(f\"Get mentions errored with: {response.json()}\")\n\n # Get final total number of rows in database and therefore number of rows added\n cursor.execute(\"SELECT COUNT(*) FROM Twitter;\")\n final_rows = cursor.fetchall()[0][0]\n rows_added = final_rows - initial_rows\n logging.info(f\"New mentions added: {rows_added}\")\n\n # Close database connection\n connection.commit()\n connection.close()\n\n return rows_added", "def filter_tweets(tweets):\n # We keep only tweets by chrisalbon with pictures\n search_tweets = [tw for tw in tweets if tw['username'] == '@chrisalbon' and len(tw['images']) > 0]\n # He made multiple tweets on the same topic, we keep only the most recent tweets\n # We use the indexes of the reversed tweet list and dictionnaries to keep only key \n unique_search_index = sorted(list({t['text'].lower():i for i,t in list(enumerate(search_tweets))[::-1]}.values()))\n unique_search_tweets = [search_tweets[i] for i in unique_search_index]\n\n # Keep non-downloaded tweets\n most_recent_file = sorted([datetime.datetime.fromtimestamp(os.path.getmtime(path)) \n for path in glob.glob(\"./downloaded_pics/*.jpg\")], reverse=True)[0]\n recent_seach_tweets = [tw for tw in unique_search_tweets if tw['date'] > most_recent_file]\n\n # Uncomment for testing new tweets\n # recent_seach_tweets = [tw for tw in unique_search_tweets if tw['date'] > datetime.datetime(2017, 7, 6, 13, 41, 48)]\n return recent_seach_tweets", "def post(self):\n url = self.request.get('url')\n try:\n response = urlfetch.fetch(url)\n if response.status_code == 200:\n items = simplejson.loads(response.content)\n key = Batch(pickled_items=pickle.dumps(items)).put()\n if key:\n taskqueue.Task(\n url='/tasks/etl',\n params={'batch_id': key.id()}\n ).add('etl')\n else:\n logging.info(\"Fetch failed, got response %d\" % response.status_code)\n except urlfetch_errors.DownloadError, e:\n logging.info(\"Twitter responded too slowly. %s\" % e.message)", "def jsonCreator(raw_data):\r\n tweets_data = []\r\n tweets_file = open(raw_data, \"r\")\r\n for line in tweets_file:\r\n try:\r\n tweet = json.loads(line)\r\n tweets_data.append(tweet)\r\n except:\r\n continue\r\n return tweets_data", "def extract_relevant(self):\n item_extraction = self.data\n my_dict = {'tweeted_time': item_extraction['created_at'],\n 'tweet_id': item_extraction['id'],\n # If the time comes when the below becomes more significant, it will be no trouble at all to make an\n # additional column for it, but delimiting it with a ` creates less clutter in the Database\n 'in_reply_to':\n \"NAME/\" + str(item_extraction['in_reply_to_screen_name']) + \"`\" +\n \"STATUSID/\" + str(item_extraction['in_reply_to_status_id_str']) + \"`\" +\n \"USERID/\" + str(item_extraction['in_reply_to_user_id_str']),\n 'lang': item_extraction['lang'],\n 'place': item_extraction['place'], 'source': item_extraction['source']}\n if item_extraction['place'] is not None:\n my_dict['place'] = item_extraction['place']['full_name']\n if 'retweeted_status' in item_extraction.keys():\n my_dict['original_author_id'] = item_extraction['retweeted_status']['user']['id']\n my_dict['original_author_handle'] = item_extraction['retweeted_status']['user']['screen_name']\n tester = item_extraction['retweeted_status']['text']\n cleaned = ' '.join(re.sub(\"(RT : )|(@[\\S]+)|(&\\S+)|(http\\S+)\", \" \", tester).split())\n removed_others = \" \".join(re.sub(\"(#\\S+)\", ' ', cleaned).split())\n final_text = ''.join(list(filter(lambda x: x.isalpha() or x is ' ', removed_others)))\n # This final text will make it a lot easier to run NLP\n final_text = final_text.strip().replace(' ', ' ').replace(' ', ' ')\n my_dict['plain_text'] = final_text.lower()\n my_dict['tweet'] = cleaned\n else:\n my_dict['original_author_id'] = item_extraction['user']['id']\n my_dict['original_author_handle'] = item_extraction['user']['screen_name']\n cleaned = ' '.join(re.sub(\"(@[\\S]+)|(&\\S+)|(http\\S+)\", \" \", item_extraction['text']).split())\n removed_others = \" \".join(re.sub(\"(#\\S+)\", ' ', cleaned).split())\n final_text = ''.join(list(filter(lambda x: x.isalpha() or x is ' ', removed_others)))\n final_text = final_text.strip().replace(' ', ' ').replace(' ', ' ')\n my_dict['plain_text'] = final_text.lower()\n my_dict['tweet'] = cleaned\n return my_dict", "def rt(result):\n #Works only on real retweets (no replies with RT chain)\n return result.retweeted", "def tweet_to_salmon_vars(self, tweet):\n # there might be more than one URL in the tweet. find the one on our domain.\n # https://dev.twitter.com/docs/tweet-entities\n link = None\n for url_data in tweet.get('entities', {}).get('urls', []):\n # expanded_url isn't always provided\n url = url_data.get('expanded_url') or url_data.get('url')\n if url and urlparse.urlparse(url).netloc == self.key().name():\n link = url\n\n # parse the timestamp, formatted e.g. 'Sun, 01 Jan 2012 11:44:57 +0000'\n created_at = tweet.get('created_at')\n if created_at:\n created_at = re.sub(' \\+[0-9]{4}$', '', created_at)\n updated = datetime.datetime.strptime(created_at,\n '%a, %d %b %Y %H:%M:%S')\n updated = updated.isoformat()\n else:\n updated = ''\n\n return {\n 'id': util.tag_uri(self.DOMAIN, str(tweet.get('id'))),\n 'author_name': tweet.get('from_user_name'),\n 'author_uri': 'acct:%s@twitter-webfinger.appspot.com' % tweet.get('from_user'),\n 'in_reply_to': link,\n 'content': tweet.get('text'),\n 'title': tweet.get('text'),\n 'updated': updated,\n }", "def TweetHandler(self):\n self.response.out.write('<br/><br/>Tweeting<br/>')\n self.response.out.write('this info will be tweeted:<br/>')\n # oldest non-tweeted and prepared\n oldest_changeset = Changeset.all().order('created_at').filter('is_tweeted =', False).filter('is_prepared =', True).fetch(1)\n if not oldest_changeset:\n self.response.out.write('nothing to tweet')\n return\n else:\n c = oldest_changeset[0]\n \n config = get_config()\n\n # do not tweet from localhost\n if not 'localhost' in self.request.url:\n auth = tweepy.OAuthHandler(config[\"consumer_key\"], config[\"consumer_secret\"])\n auth_data = OAuthAccessToken.all().filter('specifier =', config[\"twitter_username\"]).fetch(1)[0]\n auth.set_access_token(auth_data.oauth_token, auth_data.oauth_token_secret)\n self.response.out.write('<br/>tweeting with oauth:<br/>')\n api = tweepy.API(auth)\n self.response.out.write(\"id: %d\" % c.id)\n self.response.out.write(\"user: %s\" % c.user)\n self.response.out.write(\"comment: %s\" % c.comment)\n self.response.out.write(\"tweet: %s\" % c.tweet)\n try:\n api.update_status(c.tweet)\n except tweepy.error.TweepError, e: \n self.response.out.write( 'failed: %s' % e.reason )\n if \"Status is a duplicate\" in e.reason:\n c.is_tweeted = True\n c.put()\n return\n else:\n self.response.out.write('<br/>localhost - nothing actually tweeted:')\n\n self.response.out.write('<br/>%s' % c.tweet)\n\n c.is_tweeted = True\n c.put()", "def post_to_twitter(tweet):\n auth = tweepy.OAuthHandler(\n os.environ['BLADAMADUR_CONSUMER_KEY'],\n os.environ['BLADAMADUR_CONSUMER_SECRET'])\n auth.set_access_token(\n os.environ['BLADAMADUR_ACCESS_TOKEN'],\n os.environ['BLADAMADUR_ACCESS_TOKEN_SECRET'])\n api = tweepy.API(auth)\n\n api.update_status(tweet)", "def savetweets(request):\n temp = json.loads(request.body)\n #print (temp)\n tweet_list = temp['tweets']\n movieID = temp['movie_id']\n print (movieID)\n for i in tweet_list:\n i['movie_id']=movieID\n serializer = TweetsSerializer(data=i)\n #print (serializer)\n #print (serializer.data)\n print (serializer.is_valid())\n if serializer.is_valid():\n serializer.save()\n return Response(status=status.HTTP_201_CREATED)\n #print (temp['hashtags'])", "def get_tweets(self, kafka_obj):\n\n try:\n\n # call twitter api to fetch tweets\n # for tweet in api.search('#machinelearning', count=5):\n\n for tweet in tweepy.Cursor(api.search, q='#machinelearning', since='2019-06-25', until='2019-07-07').items():\n\n # empty dictionary to store required params of a tweet\n parsed_tweet = dict()\n parsed_tweet['text'] = tweet.text\n parsed_tweet['date'] = str(tweet.created_at)\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)\n parsed_tweet['tweet_id'] = tweet.id_str\n parsed_tweet['location'] = tweet.user.location\n parsed_tweet['user'] = tweet.user.screen_name\n parsed_tweet['retweet_count'] = tweet.retweet_count\n\n if tweet.entities.get('hashtags'):\n parsed_tweet['hashtags'] = ', '.join([i['text'] for i in tweet.entities.get('hashtags')])\n else:\n parsed_tweet['hashtags'] = ''\n \n print('Search API', parsed_tweet)\n\n #Pushing all the tweets to the Kafka Topic\n\n kafka_producer = kafka_obj.producer_instance()\n kafka_obj.publish_urls(kafka_producer, 'twitter', 'tweet', json.dumps(parsed_tweet))\n\n except Exception as e:\n print(e)", "def post_to_tweets(data, url):\n\n print(\"here,\", url)\n\n albums = find_all_images(data['content'])\n text = strip_text(data['content'])\n\n \"\"\"Where applicable, the images are associated with the text. This means, that to make an appropriate thread the\n conversion from a post to tweets should take into account how words relate to images in a spacial way. For this\n reason, we convert to tweets in batches.\"\"\"\n\n cfg = get_keys() # grab keys\n api = get_api(cfg) # setup API\n in_reply_to = None\n twitter_url = 'https://twitter.com'\n\n # for idx, caption in enumerate(text):\n # if idx > 0:\n # url_img = None\n # caption = re.findall(r\"[\\w']+|[.!?;]\\ \", caption)\n # text[idx] = text_to_tweets(caption, url_img)\n\n try:\n if data['in_reply_to'] is not None: # if post is reply ...\n for reply in data['in_reply_to']:\n if reply[:len(twitter_url)] == twitter_url: # if the URL points to twitter ...\n in_reply_to = reply.split('/')[-1:] # ... get the status id\n except KeyError:\n pass\n\n url = 'https://' + DOMAIN_NAME + url\n\n tweets = text_to_tweets(text, url=url) # process string into tweet thread\n\n # try and parse a lat lng.\n try:\n lat, lng = data['geo'][4:].split(\",\")\n except KeyError:\n lat, lng = None, None\n\n # post the first tweet so that we have a status id to start the thread\n status = api.update_status(status=tweets[0].pop(0), in_reply_to_status_id=in_reply_to)\n first_id = status.id # the id which points to origin of thread\n\n for album_group in text:\n try:\n media = album_group.pop(0) # get the corresponding album\n for tweet in album_group:\n status = api.update_with_media(filename=media, status=tweet, in_reply_to_status_id=status.id, lat=lat, long=lng)\n media = None\n except IndexError: # if we're out of albums...\n pass\n return 'http://twitter.com/{name}/status/{id}'.format(name=status.user.screen_name, id=first_id, lat=lat, lng=lng)", "def read_tweets():\n data = []\t\n try:\n with open(os.path.dirname(__file__) + '/../tweet_input/tweets.txt') as f:\n for line in f:\n jfile = json.loads(''.join(line))\n data.append(jfile)\n return data\n except:\n print(\"File not found\")\n return []", "def get_tweets():\r\n tweets = models.Tweet.query.all()\r\n output = []\r\n\r\n for tweet in tweets:\r\n tweet_data = {'id': tweet.id,\r\n 'content': tweet.text_content,\r\n 'username': tweet.username,\r\n 'timestamp': tweet.timestamp.isoformat(),\r\n 'likes_count': models.Like.query.filter(models.Like.post_id == tweet.id).count(),\r\n 'retweets_count': models.Retweet.query.filter(models.Retweet.post_id == tweet.id).count()}\r\n\r\n output.append(tweet_data)\r\n\r\n return {\"tweets\": output}", "def pushTweets(tweets,user,cacheKey=False):\n \n tweetDump = filterTweets(tweets) # Extract mentions, URLs, replies hashtags etc...\n\n pushRenderedTweets2Neo.delay(user,tweetDump) \n pushRenderedTweets2Cass.delay(user,tweetDump)\n pushRenderedTweets2Solr.delay(tweetDump['tweets']+tweetDump['retweets'])\n\n if cacheKey: # These are the last Tweets, tell the scaper we're done.\n cache.set(cacheKey,'done')\n print '*** '+user+': DONE WITH TWEETS ***' \n \n #return True", "def json_write_tweets_data(content):\n count = 0\n all_data = []\n file_name = create_newfile()\n for data_200 in content:\n tweets = json.loads(data_200[0:len(data_200)])\n for tweet in tweets:\n count += 1\n data = {}\n data['text'] = tweet['text']\n data['favorite_count'] = tweet['favorite_count']\n data['retweet_count'] = tweet['retweet_count']\n dt = datetime.strptime(tweet['created_at'],\n '%a %b %d %H:%M:%S +0000 %Y')\n data['created_at'] = datetime.strftime(dt, '%Y-%m-%d %H:%M:%S.%f')\n data['id'] = tweet['id']\n data['source'] = tweet['source']\n all_data.append(data)\n file_name = create_newfile()\n with open(file_name, 'w') as f:\n json.dump(all_data, f, indent=1)\n f.close()", "def original_three_tweets():\n test_tweets = [\n \"is #bigdata finally the answer to end poverty? \\\n @lavanyarathnam http://ow.ly/o8gt3 #analytics\",\n \"interview: xia wang, astrazeneca on #bigdata and the promise of effective \\\n healthcare #kdn http://ow.ly/ot2uj\",\n \"big data is not just for big business. on how #bigdata is being deployed for \\\n small businesses: http://bddy.me/1bzukb3 @cxotodayalerts #smb\"\n ]\n return test_tweets", "def process_tweet(tweet):\n d = {}\n d['hastags'] = [hashtag['text'] for hashtag in tweet['entities']['hashtags']]\n d['text'] = tweet['text']\n d['user'] = tweet['user']['screen_name']\n d['user_loc'] = tweet['user']['location']\n return d", "def twitter(self):\n\n q = \" OR \".join(self.search_terms) + \" -filter:retweets\"\n results = self.__api.search(q=q, lang='en', count=100)\n\n tweets = []\n\n for res in results:\n\n publishedAt = datetime.strptime(res._json['created_at'], '%a %b %d %H:%M:%S +0000 %Y').strftime(\"%Y-%m-%d\")\n\n if (res._json['in_reply_to_screen_name'] == None and publishedAt == datetime.now().strftime(\"%Y-%m-%d\")):\n tweets.append([res._json['id'],\n res._json['text'],\n res._json['user']['screen_name'],\n publishedAt,\n res._json['user']['followers_count']])\n\n self.list = pd.DataFrame(tweets, columns=['id', 'title', 'user', 'publishedAt', 'followers_count']).nlargest(10,\n 'followers_count')\n\n return", "def load_tweets(file):\n with open(file) as f:\n data = json.load(f)\n return data", "def getTweetById(tweetId):\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName=\"apiConf2.txt\"))\n tmpTweet = api.get_status(tweetId, tweet_mode=\"extended\")\n tmpTweet._json['text']=tmpTweet._json['full_text']\n del (tmpTweet._json['full_text'])\n return tmpTweet._json", "def on_tweet(self, tweet):\n pass", "def imprimi_tweets(lista):\n\n for texto in lista:\n print(texto)", "def load_tweets(brand):\n\n api = twitter.Api(\n consumer_key=os.environ['TWITTER_CONSUMER_KEY'],\n consumer_secret=os.environ['TWITTER_CONSUMER_SECRET'],\n access_token_key=os.environ['TWITTER_ACCESS_TOKEN_KEY'],\n access_token_secret=os.environ['TWITTER_ACCESS_TOKEN_SECRET'])\n\n twitter_handle = find_twitter_handle(brand)\n\n if not twitter_handle:\n return []\n else:\n results = []\n count = 0\n\n # keep querying api for media posts until we get 20 media posts, up to a max of 10 queries\n while len(results) < 20 and count < 10:\n if count == 0:\n # the first time we query, there will not be a max id\n response = api.GetUserTimeline(screen_name=twitter_handle[1:], include_rts=False, count=200, exclude_replies=True)\n else:\n # the subsquent times we query, the max id should be set to the oldest post, which will be tweets_lst[-1]\n # the next query will get the prior 200 posts prior to that oldest post\n response = api.GetUserTimeline(screen_name=twitter_handle[1:], max_id=max_id, include_rts=False, count=200, exclude_replies=True)\n\n tweets_lst = []\n\n for tweet in response:\n tweets_lst.append(tweet._json)\n\n # for each twitter post, search for media in the post and add it to the results list\n for status in tweets_lst:\n if \"entities\" in status:\n if \"media\" in status[\"entities\"]:\n results.append(status[\"entities\"][\"media\"][0][\"media_url\"])\n\n # if there are twitter posts, but less than 20 results, then we should set the max id to the oldest post, and then next time\n # the loop runs it will query starting from the previously oldest post\n if len(tweets_lst) > 1:\n max_id = tweets_lst[-1][\"id\"]\n # if there is less than or only one tweet, then return what we already have in the results list\n else:\n return results\n # add one to the count to track we do not exceed the 10 query count\n count = count + 1\n return results", "def load_tweets(filename):\n\n try:\n with open(filename, 'r') as f:\n data = json.loads(f.read())\n except:\n print('ERROR in load_tweets.')\n\n return data", "def retrieve_all_tweets(api, id_scr):\n full_tweet_list = []\n new_tweets = api.user_timeline(user_id=id_scr, count=200)\n full_tweet_list.extend(new_tweets)\n oldest = full_tweet_list[-1].id - 1\n\n while len(new_tweets) > 0:\n print \"getting tweets before {}\".format(oldest)\n new_tweets = api.user_timeline(user_id=id_scr, count=200, max_id=oldest)\n full_tweet_list.extend(new_tweets)\n oldest = full_tweet_list[-1].id - 1\n\n out_tweets = [[tweet.id_str, tweet.created_at, tweet.text.encode(\"utf-8\"), tweet.entities] for tweet in\n full_tweet_list]\n\n with open('{}_tweets.csv'.format(id_scr), 'wb') as f:\n writer = csv.writer(f)\n writer.writerow([\"id\", \"created_at\", \"text\", \"entities\"])\n writer.writerows(out_tweets)", "def processIdiom(i, idiom):\n global db\n cursor = db.cursor()\n \n statuses = searchIdiom(i, idiom)\n #Should have at least 10 statuses to be useful\n if len(statuses) < 10:\n return\n # loop through each of my statuses, and print its content\n for status in statuses:\n #print status[\"text\"]\n try:\n id_str = status[\"id_str\"]\n text = status[\"text\"].encode('ascii','ignore')\n retweet_count = status[\"retweet_count\"]\n user = status[\"user\"]\n created_at = status[\"created_at\"]\n entities = status[\"entities\"]\n entities = json.dumps(entities)\n\n user_id_str = user[\"id_str\"]\n name = user[\"name\"].encode('ascii','ignore')\n screen_name = user[\"screen_name\"]\n description = user[\"description\"].encode('ascii','ignore')\n user_entities = json.dumps(user[\"entities\"])\n followers_count = user[\"followers_count\"]\n listed_count = user[\"listed_count\"]\n profile_image_url = user[\"profile_image_url\"]\n verified = str(user[\"verified\"])\n\n \n cursor.execute('INSERT IGNORE INTO idiomatic_tweets(idiom, id_str, text, retweet_count, user_id_str, created_at, entities, name, profile_image_url, screen_name, verified) \\\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);' \\\n ,(idiom, id_str, text, retweet_count, user_id_str, created_at, entities, name, profile_image_url, screen_name, verified))\n\n cursor.execute('INSERT IGNORE INTO idiomatic_users(id_str, name, screen_name, description, entities, followers_count, listed_count, profile_image_url, verified) \\\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s);' \\\n ,(user_id_str, name, screen_name, description, user_entities, followers_count, listed_count, profile_image_url, verified))\n except Exception as e:\n print('Error : ', e)\n print sys.exc_traceback.tb_lineno \n\n if statuses:\n cursor.execute('INSERT IGNORE INTO idiomatic_idioms(idiom) VALUES (%s);', (idiom,))\n else:\n print \"statuses\" , statuses", "def get_tweets(keyword):\n url = 'http://search.twitter.com/search.json?q='\n\n page = urllib.urlopen('%s%s' % (url, keyword))\n blob = page.read()\n jsonblob = json.loads(blob)\n return jsonblob", "def reply_to_tweets():\n last_seen_id = retrieve_last_seen_id(FILE_NAME)\n mentions = api.mentions_timeline(\n last_seen_id,\n tweet_mode='extended')\n\n for mention in reversed(mentions):\n print(str(mention.id) + ' - ' + mention.full_text, flush=True)\n last_seen_id = mention.id\n store_last_seen_id(last_seen_id, FILE_NAME)\n for i in range(len(keywords)):\n if keywords[i] in mention.full_text.lower():\n print(\"responding back to: \" + '@' +\n mention.user.screen_name, flush=True)\n api.update_status('@' + mention.user.screen_name + ' ' +\n salts[i], mention.id)", "def get_tweets(self, query, count=10):\n # empty list to store parsed tweets\n tweets = []\n\n try:\n # call twitter api to fetch tweets\n fetched_tweets = self.api.search(q=query, count=count)\n\n # parsing tweets one by one\n for tweet in fetched_tweets:\n # empty dictionary to store required params of a tweet\n parsed_tweet = {}\n\n # saving text of tweet\n parsed_tweet['text'] = tweet.text\n # saving sentiment of tweet\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)\n\n # appending parsed tweet to tweets list\n if tweet.retweet_count > 0:\n # if tweet has retweets, ensure that it is appended only once\n if parsed_tweet not in tweets:\n tweets.append(parsed_tweet)\n else:\n tweets.append(parsed_tweet)\n\n # return parsed tweets\n return tweets\n\n except tweepy.TweepError as e:\n # print error (if any)\n print(\"Error : \" + str(e))", "def score_tweets():\n\t\n\ts = -1\n\tstatus = 'Error'\n\treason = \"\"\n\ttid = -1\n\ttjson = request.json['tweetJSON']\n\tbatchResult = []\n\n\tfor tweet in tjson:\n\t\ttry:\t\t\n\t\t\ts = model.score(tweet)\n\t\t\tstatus = 'OK'\n\t\t\ttobj = json.loads(tweet)\n\t\t\ttid = tobj['id']\n\n\t\texcept:\n\t\t\treason = \"Error loading json.\"\n\n\t\tbatchResult.append({ \n\t\t\t\t\t 'status' : status,\n\t\t\t\t\t 'score' : s,\n\t\t\t\t\t 'tid' : tid,\n\t\t\t\t\t 'reason' : reason\n\t\t\t\t\t })\n\n\treturn jsonify({\n\t\t\t'batchResult' : batchResult\n\t\t})", "def get_tweets(twitter, screen_name, num_tweets):\n\n request = robust_request(twitter, 'search/tweets', {'q': screen_name, 'count': num_tweets})\n tweets = [a['text'] for a in request]\n\n return tweets", "def tweet_text(tweet):\n return tweet['text']", "def get_tweets(api):\n return api.user_timeline()", "def getTwitterscraperTweets():\n import subprocess\n numOfAuthors = len(authors)\n numOfWords = len(words)\n callVars = ['./recoverTweets.sh',str(numOfWords),str(numOfAuthors)]\n callVars.extend([word for word in words]+[author for author in authors])\n if startingDate:\n callVars.extend(['-sd',startingDate])\n if endingDate:\n callVars.extend(['-ed',endingDate])\n #if maxTweets:\n # callVars.extend(['-max',str(maxTweets)])\n callVars.append(\"data/twitterscrapertmp\")\n print(\"Querying twitterAPI by using TwitterScraper... (it may take a long time)\")\n subprocess.call(callVars)\n with open('data/twitterscrapertmp') as json_data:\n tweets = json.load(json_data)\n if removeRetweets:\n tweets = [tweet for tweet in tweets if not isRetweet(tweet)]\n print(\"Query ended. Retrieved: \",len(tweets),\" tweets\")\n #saveTweets(tweets,outputCollection,onFile=True,onDb=True)\n os.remove('data/twitterscrapertmp')\n return tweets" ]
[ "0.6760458", "0.6652912", "0.65520895", "0.63962245", "0.6344638", "0.634384", "0.633406", "0.6325491", "0.632352", "0.62919587", "0.624689", "0.62440544", "0.6242781", "0.6195591", "0.61683637", "0.6155086", "0.6136341", "0.6133877", "0.6104071", "0.6096447", "0.60790527", "0.60706174", "0.6065542", "0.60578114", "0.60324836", "0.6031191", "0.60263395", "0.6021725", "0.60041237", "0.6001241", "0.59884495", "0.5978984", "0.5964298", "0.5955825", "0.59542036", "0.5952568", "0.594373", "0.5940506", "0.59382737", "0.59180254", "0.5913202", "0.5912777", "0.591039", "0.58940494", "0.58907855", "0.5886272", "0.5872529", "0.5843088", "0.58378565", "0.5834584", "0.5830855", "0.580585", "0.5780679", "0.577928", "0.5779021", "0.57760656", "0.57680106", "0.5763647", "0.57621497", "0.5759516", "0.57527214", "0.5750835", "0.5748897", "0.57441", "0.57410634", "0.57392484", "0.5738671", "0.57325906", "0.57193476", "0.5719123", "0.5718783", "0.57183015", "0.5716511", "0.57133096", "0.57119983", "0.5707715", "0.56993586", "0.56877726", "0.56864744", "0.5684821", "0.568225", "0.56814307", "0.56789005", "0.5676675", "0.56740385", "0.56671095", "0.5664451", "0.56606466", "0.56597203", "0.5649738", "0.5647935", "0.5644081", "0.564175", "0.56413734", "0.5638784", "0.5632169", "0.56301904", "0.5626136", "0.5619931", "0.5616237" ]
0.5840768
48
fetch latest tweets saved from json
def __refresh_local_tweets(self): f_tweets = open(f'{TWEETS}', 'r') f_tweeted = open(f'{TWEETED}', 'r') try: self.tweets = json.load(f_tweets) self.tweeted = json.load(f_tweeted) finally: f_tweets.close() f_tweeted.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_tweets(self):\r\n now = datetime.datetime.now()\r\n tweet_json = self.api.get_tweets(self.last, now)\r\n self.last = now\r\n return [Tweet(x) for x in tweet_json]", "def get_posts(username):\r\n\r\n # Authenticate to Twitter\r\n auth = tweepy.OAuthHandler(twitter_credentials.CONSUMER_KEY, twitter_credentials.CONSUMER_SECRET)\r\n auth.set_access_token(twitter_credentials.ACCESS_TOKEN, twitter_credentials.ACCESS_TOKEN_SECRET)\r\n\r\n api = tweepy.API(auth)\r\n\r\n try:\r\n api.verify_credentials()\r\n print(\"Authentication OK\")\r\n except:\r\n print(\"Error during authentication\")\r\n\r\n alltweets=[]\r\n\r\n new_tweets = api.user_timeline(screen_name = username,count=200,tweet_mode='extended')\r\n status = new_tweets[0]\r\n json_str = json.dumps(status._json)\r\n\r\n #convert to string\r\n json_str = json.dumps(status._json)\r\n #deserialise string into python object\r\n parsed = json.loads(json_str)\r\n print(json.dumps(parsed, indent=4, sort_keys=True))\r\n\r\n # save most recent tweets\r\n alltweets.extend(new_tweets)\r\n\r\n # save the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n\r\n # keep grabbing tweets until there are no tweets left to grab\r\n while len(new_tweets) > 0:\r\n print(f\"getting tweets before {oldest}\")\r\n\r\n # all subsiquent requests use the max_id param to prevent duplicates\r\n new_tweets = api.user_timeline(screen_name=username, count=200, max_id=oldest,tweet_mode='extended')\r\n\r\n # save most recent tweets\r\n alltweets.extend(new_tweets)\r\n\r\n # update the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n\r\n print(f\"...{len(alltweets)} tweets downloaded so far\")\r\n\r\n\r\n outtweets=[]\r\n\r\n\r\n for item in alltweets:\r\n\r\n mined = {\r\n 'tweet_id': item.id,\r\n 'name': item.user.name,\r\n 'screen_name': item.user.screen_name,\r\n 'retweet_count': item.retweet_count,\r\n 'lang' : item.lang,\r\n 'text': item.full_text,\r\n 'mined_at': datetime.datetime.now(),\r\n 'created_at': item.created_at,\r\n 'favourite_count': item.favorite_count,\r\n 'hashtags': item.entities['hashtags'],\r\n 'status_count': item.user.statuses_count,\r\n 'location': item.place,\r\n 'source_device': item.source\r\n }\r\n\r\n try:\r\n mined['retweet_text'] = item.retweeted_status.full_text # In case the tweet is a RT, there is a need to\r\n # retrieve the retweet_text field which contains the full comment (up to 280 char) accompanying the retweet\r\n except:\r\n mined['retweet_text'] = ''\r\n\r\n outtweets.extend([mined])\r\n\r\n return outtweets", "def get_tweets():\n broken_json = read_tweets()\n #\n # Remove the last comma and wrap in a json list\n #\n parsed = json.loads('[%s]' % broken_json[:-1])\n return parsed", "def _get_tweets(self):\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n api = tweepy.API(auth)\n search = api.search(self.term, lang='en', count=100)\n\n print(f\"Getting tweets that mention '{self.term}', \"\n f\"this may take a while...\")\n\n save_tweet_text = [tweet._json['text'] for tweet in search]\n while len(save_tweet_text) < 1000:\n try:\n oldest = search[-1].id - 1\n search = api.search(self.term, lang='en', count=100, max_id=oldest)\n new_tweets = [tweet._json['text'] for tweet in search]\n save_tweet_text.extend(new_tweets)\n\n # Turn into a set to remove duplicated tweets, then back to list\n save_tweet_text = list(set(save_tweet_text))\n except IndexError:\n break\n\n print(f\"Done. {len(save_tweet_text)} Tweets received.\")\n return save_tweet_text", "def list_tweets():\n tweets = []\n tuples = query_db('''\n select message.*, user.* from message, user\n where message.author_id = user.user_id\n order by message.pub_date desc limit ?''', [PER_PAGE])\n for tuple in tuples:\n tweet = {}\n tweet[\"username\"] = tuple['username']\n tweet[\"email\"] = tuple['email']\n tweet[\"text\"] = tuple['text']\n tweet[\"pub_date\"] = tuple['pub_date']\n tweets.append(tweet)\n return jsonify({'tweets':tweets}),200", "def get_tweets(api):\n return api.user_timeline()", "def get_tweets(keyword):\n url = 'http://search.twitter.com/search.json?q='\n\n page = urllib.urlopen('%s%s' % (url, keyword))\n blob = page.read()\n jsonblob = json.loads(blob)\n return jsonblob", "def get_all_tweets(user, alltweets):\n\n #TODO check that user is a valid screen name??\n\n #make initial request for most recent tweets (200 is the maximum allowed count)\n new_tweets = api.user_timeline(user, count=200)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n #print alltweets[0].text\n\n #save the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n\n #print \"starting loop\"\n #keep grabbing tweets until there are no tweets left to grab\n while len(new_tweets) > 0:\n\n #all subsiquent requests starting with oldest\n new_tweets = api.user_timeline(user, count=200, max_id=oldest)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n\n #update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1", "def get_all_tweets(screen_name: object):\r\n temptweets = []\r\n alltweets = []\r\n new_tweets = api.user_timeline(screen_name=screen_name, count=199)\r\n alltweets.extend(new_tweets)\r\n print(alltweets[1].id)\r\n oldest = alltweets[-1].id - 1\r\n while 0 < len(new_tweets) < 200:\r\n new_tweets = tweepy.Cursor(api.user_timeline, screen_name=screen_name, count=199, max_id=oldest).items(1500)\r\n alltweets.extend(new_tweets)\r\n for tweet in alltweets:\r\n if (not tweet.retweeted) and ('RT @' not in tweet.text):\r\n temptweets.append(tweet)\r\n oldest = alltweets[-1].id - 1\r\n print(\"Total tweets downloaded from %s are %s\" % (screen_name, len(temptweets)))\r\n return temptweets", "def getTwitterscraperTweets():\n import subprocess\n numOfAuthors = len(authors)\n numOfWords = len(words)\n callVars = ['./recoverTweets.sh',str(numOfWords),str(numOfAuthors)]\n callVars.extend([word for word in words]+[author for author in authors])\n if startingDate:\n callVars.extend(['-sd',startingDate])\n if endingDate:\n callVars.extend(['-ed',endingDate])\n #if maxTweets:\n # callVars.extend(['-max',str(maxTweets)])\n callVars.append(\"data/twitterscrapertmp\")\n print(\"Querying twitterAPI by using TwitterScraper... (it may take a long time)\")\n subprocess.call(callVars)\n with open('data/twitterscrapertmp') as json_data:\n tweets = json.load(json_data)\n if removeRetweets:\n tweets = [tweet for tweet in tweets if not isRetweet(tweet)]\n print(\"Query ended. Retrieved: \",len(tweets),\" tweets\")\n #saveTweets(tweets,outputCollection,onFile=True,onDb=True)\n os.remove('data/twitterscrapertmp')\n return tweets", "def retrieve_all_tweets(api, id_scr):\n full_tweet_list = []\n new_tweets = api.user_timeline(user_id=id_scr, count=200)\n full_tweet_list.extend(new_tweets)\n oldest = full_tweet_list[-1].id - 1\n\n while len(new_tweets) > 0:\n print \"getting tweets before {}\".format(oldest)\n new_tweets = api.user_timeline(user_id=id_scr, count=200, max_id=oldest)\n full_tweet_list.extend(new_tweets)\n oldest = full_tweet_list[-1].id - 1\n\n out_tweets = [[tweet.id_str, tweet.created_at, tweet.text.encode(\"utf-8\"), tweet.entities] for tweet in\n full_tweet_list]\n\n with open('{}_tweets.csv'.format(id_scr), 'wb') as f:\n writer = csv.writer(f)\n writer.writerow([\"id\", \"created_at\", \"text\", \"entities\"])\n writer.writerows(out_tweets)", "def fetch_tweets(self, screen_name, count):\n return {}", "def twitter(n=1):\n tweet = get_tweet(TWITTER_NAME, n)\n tweet_info = {\n 'text': tweet.text,\n 'date': tweet.created_at.strftime('%A, %B %d'),\n 'time': tweet.created_at.strftime('%H:%M'),\n 'latest': (int(n) == 1), # True if n is one, else False.\n }\n return jsonify(tweet_info)", "def get_tweets(self):\r\n return self.tweets", "def get_tweets(self, query, count=10):\n # empty list to store parsed tweets\n tweets = []\n\n try:\n # call twitter api to fetch tweets\n fetched_tweets = self.api.search(q=query, count=count)\n\n # parsing tweets one by one\n for tweet in fetched_tweets:\n # empty dictionary to store required params of a tweet\n parsed_tweet = {}\n\n # saving text of tweet\n parsed_tweet['text'] = tweet.text\n # saving sentiment of tweet\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)\n\n # appending parsed tweet to tweets list\n if tweet.retweet_count > 0:\n # if tweet has retweets, ensure that it is appended only once\n if parsed_tweet not in tweets:\n tweets.append(parsed_tweet)\n else:\n tweets.append(parsed_tweet)\n\n # return parsed tweets\n return tweets\n\n except tweepy.TweepError as e:\n # print error (if any)\n print(\"Error : \" + str(e))", "def get_tweets(filename):\n try:\n tweets = []\n pd_tweets = pd.read_json(filename, lines=True) # use parameter lines=True to read the file as a json object per line\n pd_tweets = pd_tweets[pd_tweets.text.notnull()]['text']\n tweets = pd_tweets.to_list()\n return tweets\n except:\n print(\"Something went wrong parsing the file \" + filename)", "def get_tweets():\r\n tweets = models.Tweet.query.all()\r\n output = []\r\n\r\n for tweet in tweets:\r\n tweet_data = {'id': tweet.id,\r\n 'content': tweet.text_content,\r\n 'username': tweet.username,\r\n 'timestamp': tweet.timestamp.isoformat(),\r\n 'likes_count': models.Like.query.filter(models.Like.post_id == tweet.id).count(),\r\n 'retweets_count': models.Retweet.query.filter(models.Retweet.post_id == tweet.id).count()}\r\n\r\n output.append(tweet_data)\r\n\r\n return {\"tweets\": output}", "def filter_tweets(tweets):\n # We keep only tweets by chrisalbon with pictures\n search_tweets = [tw for tw in tweets if tw['username'] == '@chrisalbon' and len(tw['images']) > 0]\n # He made multiple tweets on the same topic, we keep only the most recent tweets\n # We use the indexes of the reversed tweet list and dictionnaries to keep only key \n unique_search_index = sorted(list({t['text'].lower():i for i,t in list(enumerate(search_tweets))[::-1]}.values()))\n unique_search_tweets = [search_tweets[i] for i in unique_search_index]\n\n # Keep non-downloaded tweets\n most_recent_file = sorted([datetime.datetime.fromtimestamp(os.path.getmtime(path)) \n for path in glob.glob(\"./downloaded_pics/*.jpg\")], reverse=True)[0]\n recent_seach_tweets = [tw for tw in unique_search_tweets if tw['date'] > most_recent_file]\n\n # Uncomment for testing new tweets\n # recent_seach_tweets = [tw for tw in unique_search_tweets if tw['date'] > datetime.datetime(2017, 7, 6, 13, 41, 48)]\n return recent_seach_tweets", "def process_tweets(tweets_response, keep_all=False, debug=False):\n tweets = tweets_response\n\n #print(json.dumps(tweets, indent=4, ensure_ascii=False))\n\n output_tweets = []\n for tweet in tweets:\n # loop through every tweet\n output_tweet = {}\n output_tweet['likes'] = 0\n for k, v in tweet.items():\n if k == \"favorite_count\" or k == \"retweeted_status\":\n # print('checking favorite_count at {}'.format(k))\n # print(v)\n if k == \"favorite_count\" and v:\n output_tweet['likes'] = v\n elif k == \"retweeted_status\" and v:\n # print(\"rt:\", v)\n try:\n output_tweet['likes'] = v['favorite_count']\n except:\n print('favorites not found')\n print(v)\n pass\n\n elif k == \"media\" and v:\n # turn media dict into img url\n output_tweet[k] = []\n for m in v:\n output_tweet[k].append(m['media_url_https'])\n\n elif k == \"id\" and v:\n # make url from id and dispose id\n output_tweet['url'] = \"https://twitter.com/anyuser/status/\" + str(v)\n\n elif k == \"retweet_count\":\n if v:\n if debug: print(' picking this: ', k, v)\n output_tweet[k] = v\n else:\n if debug: print(' skipping this: ', k, v)\n # not keeping those with 0 RT\n output_tweet[k] = 0\n\n elif k == \"created_at\":\n tweet_creation_time = str_2_datetime(v, input_format=time_format_twitter_created_at)\n tweet_checked_time = datetime.datetime.now(tz=pytz.utc)\n\n output_tweet['timestamp'] = {\n \"created\": datetime_2_str(tweet_creation_time, output_format=time_format_full_with_timezone),\n \"last_checked\": datetime_2_str(tweet_checked_time, output_format=time_format_full_with_timezone)\n }\n\n else:\n # keep k:v same\n if debug: print('keeping this: ', k, repr(v))\n output_tweet[k] = v\n\n print('num of likes: ', output_tweet['likes'])\n\n output_tweets.append(output_tweet)\n\n output = []\n if not keep_all:\n for o in output_tweets:\n if o['likes'] > 0 and o['retweet_count'] > 0:\n output.append(o)\n else:\n output = output_tweets\n\n return output", "def get_tweets(user, num = 200):\n tweets = []\n \n for tweet in user.home_timeline(count = num):\n edited_tweet = tweet.text\n edited_tweet = edited_tweet.encode(encoding='UTF-8', errors='Ignore') \n tweets.append(edited_tweet)\n return tweets", "def get_latest_dweet():\r\n resource = URL + '/get/latest/dweet/for/' + thing_name # (6)\r\n logger.debug('Getting last dweet from url %s', resource)\r\n\r\n r = requests.get(resource) # (7)\r\n\r\n if r.status_code == 200: # (8)\r\n dweet = r.json() # return a Python dict.\r\n logger.debug('Last dweet for thing was %s', dweet)\r\n\r\n dweet_content = None\r\n\r\n if dweet['this'] == 'succeeded': # (9)\r\n # We're just interested in the dweet content property.\r\n dweet_content = dweet['with'][0]['content'] # (10)\r\n\r\n return dweet_content\r\n\r\n else:\r\n logger.error('Getting last dweet failed with http status %s', r.status_code)\r\n return {}", "def grab_tweets():\n\n tweets = []\n long_tweets = []\n\n for each in lists:\n tweets = tweets + twitter.GetListTimeline(list_id=each.id,\n count=count,\n include_rts=True)\n for tweet in tweets:\n if len(tweet.text) >= min_tweet_len:\n long_tweets.append(tweet)\n shuffle(long_tweets)\n\n if len(long_tweets) >= num_tweets:\n return long_tweets[:num_tweets]\n else:\n return long_tweets", "def get_tweets(keyword, max_tweets=200):\n\n # API keys.\n consumer_key = \"kNOG1klRMMUYbsjMuY5TKl4lE\"\n consumer_secret = \"ieghv6WI1qseYly43A0Ra1MPksEw1i5Onma0txfEu5aHantD2v\"\n access_key = \"3291622062-15ssVc0qpJXf2SFXbA7vgfl1Sooz4Ueo2DGPQVz\"\n access_secret = \"9XJuzgGSVLnx93tq6NfRzMT07S6o2lzjmHfjt3VRlkqXn\"\n\n # Initialize tweepy API object and authorize using API key.\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_key, access_secret)\n api = tweepy.API(auth)\n\n \"\"\" Get tweets.\"\"\"\n\n alltweets = []\n for status in tweepy.Cursor(\n api.search,\n q=keyword + \" -RT\", # the -RT flag excludes retweets.\n count=1000,\n result_type=\"recent\",\n include_entities=True,\n monitor_rate_limit=True,\n wait_on_rate_limit=True,\n lang=\"en\",\n ).items():\n\n # get text of the tweet, encoding as utf-8.\n text = str(status.text.encode(\"utf-8\"))\n\n # add to the data structure, alltweets, holding the tweets.\n alltweets.append(text)\n\n # if we've reached max_tweets, break.\n if len(alltweets) >= max_tweets:\n break\n\n return alltweets", "def getLatestData(self):\n jsonText = self.session.get(self.jsonURL).text\n\n # Somehow, the output I am getting has some garbage at the beginning.\n # So, skipping all text before first instance of \"{\".\n jsonText = jsonText[jsonText.find(\"{\"):]\n latestData = json.loads(jsonText)\n return latestData", "def get_tweets_from_json_file(self, filename, **kwargs):\n # empty list to store parsed tweets\n tweets = []\n if not os.path.isfile(filename):\n print('Could not find file: ', filename)\n return -1\n\n # get all handles from research subject\n handles = []\n for record in research.get_values(**kwargs):\n handles.append(record['handle'])\n\n with open(filename, mode='r', encoding='utf-8') as json_file:\n data = json.load(json_file)\n\n # parsing tweets one by one\n for account in data:\n print(account)\n if account['handle'] in handles:\n for record in account['tweets']:\n tweet = record[0].strip()\n timestamp = record[1]\n print(tweet)\n\n # empty dictionary to store required params of a tweet\n parsed_tweet = {}\n\n # saving text of tweet\n parsed_tweet['text'] = tweet.strip()\n # saving sentiment of tweet\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.strip())\n\n tweets.append(parsed_tweet)\n\n # return parsed tweets\n return tweets", "def read_tweets(self)-> None:\n self.no_of_tweets = len(self.list_of_files)\n for i in range(0, self.no_of_tweets):\n # for i in range(0,10): # running a small loop for testing purpose\n try:\n with open(self.list_of_files[i]) as json_file:\n file = json.load(json_file)\n tweet = {'id': file['id']}\n try:\n tweet['created_time'] = file['retweeted_status']['created_at']\n tweet['text'] = file['retweeted_status']['full_text']\n except:\n tweet['created_time'] = file['created_at']\n tweet['text'] = file['full_text']\n self.tweets.append(tweet)\n except:\n print(\"Error for \",self.list_of_files[i])\n if i%1000 == 0:\n print(str(round(i/self.no_of_tweets,2)*100),\"% read\")\n print(\"All Tweets read into memory\")", "def get_tweets(self):\n keyword = 'covid'\n\n # Load tokens from file\n with open('../data/tokens.json', 'r') as f:\n tokens = json.load(f)\n\n # Stream tweets\n auth = tweepy.OAuthHandler(tokens['consumer_key'], tokens['consumer_secret'])\n auth.set_access_token(tokens['access_token_key'], tokens['access_token_secret'])\n api = tweepy.API(auth)\n\n # listen for tweets\n while True:\n\n # TODO: save file in Cloud Storage\n file_name = date.today().strftime('corpus-%d-%m-%Y.json')\n print(f'Updating {file_name} ...')\n\n StreamListener = StreamListener(\n file_name=file_name, \n max_tweets=1000)\n myStream = tweepy.Stream(\n auth=api.auth, \n listener=StreamListener)\n\n myStream.filter(track=[keyword], languages=['en'])\n \n time.sleep(60)", "def getTweetsByUser(username, maxTweets=1000):\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName=\"apiConf2.txt\"))\n myTweets=[]\n if words:\n apiRes = tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items()\n for tweet in apiRes:\n if any(containsWord(tweet._json['full_text'],word) for word in words):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n if sortBy=='newest':\n for tweet in tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items(maxTweets):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n for tweet in tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items():\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n\n return getTopNTweets(myTweets, maxTweets)", "def load_tweets(file):\n with open(file) as f:\n data = json.load(f)\n return data", "def load_tweets(filename):\n\n try:\n with open(filename, 'r') as f:\n data = json.loads(f.read())\n except:\n print('ERROR in load_tweets.')\n\n return data", "def get_tweets_from_username(api, screen_name):\n\n # initialize a list to hold all the Tweets\n alltweets = []\n output = []\n\n # make initial request for most recent tweets\n # (200 is the maximum allowed count)\n new_tweets = api.user_timeline(screen_name=screen_name, count=200, tweet_mode=\"extended\")\n\n # save most recent tweets\n alltweets.extend(new_tweets)\n\n # save the id of the oldest tweet less one to avoid duplication\n oldest = alltweets[-1].id - 1\n\n # keep grabbing tweets until there are no tweets left\n while len(new_tweets) > 0:\n print(\"Getting tweets before %s\" % (oldest))\n\n # all subsequent requests use the max_id param to prevent\n # duplicates\n new_tweets = api.user_timeline(screen_name=screen_name, count=200, max_id=oldest, tweet_mode=\"extended\")\n\n # save most recent tweets\n alltweets.extend(new_tweets)\n\n # update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n print(\"... %s tweets downloaded so far\" % (len(alltweets)))\n\n # transform the tweepy tweets into a 2D array that will\n for tweet in alltweets:\n output.append([tweet.id_str,\n tweet.created_at,\n tweet.full_text,\n tweet.in_reply_to_screen_name,\n tweet.user.name,\n tweet.user.location,\n tweet.user.followers_count,\n tweet.user.friends_count,\n tweet.geo,\n tweet.coordinates,\n tweet.retweet_count,\n tweet.favorite_count,\n tweet.lang,\n tweet.retweeted])\n\n # Convert to dataframe\n df = pd.DataFrame.from_records(output, columns=[\"id_str\",\n \"created_at\",\n \"full_text\",\n \"in_reply_to_screen_name\",\n \"user_name\",\n \"user_location\",\n \"user_followers_count\",\n \"user_friends_count\",\n \"geo\",\n \"coordinates\",\n \"retweet_count\",\n \"favorite_count\",\n \"lang\",\n \"retweeted\"])\n return df", "def twitter(self):\n\n q = \" OR \".join(self.search_terms) + \" -filter:retweets\"\n results = self.__api.search(q=q, lang='en', count=100)\n\n tweets = []\n\n for res in results:\n\n publishedAt = datetime.strptime(res._json['created_at'], '%a %b %d %H:%M:%S +0000 %Y').strftime(\"%Y-%m-%d\")\n\n if (res._json['in_reply_to_screen_name'] == None and publishedAt == datetime.now().strftime(\"%Y-%m-%d\")):\n tweets.append([res._json['id'],\n res._json['text'],\n res._json['user']['screen_name'],\n publishedAt,\n res._json['user']['followers_count']])\n\n self.list = pd.DataFrame(tweets, columns=['id', 'title', 'user', 'publishedAt', 'followers_count']).nlargest(10,\n 'followers_count')\n\n return", "def flatten_tweets(tweets_json):\r\n tweets_list = []\r\n \r\n # Iterate through each tweet\r\n for tweet in tweets_json:\r\n tweet_obj = json.loads(tweet)\r\n \r\n # Store the user screen name in 'user-screen_name'\r\n tweet_obj['user-screen_name'] = tweet_obj['user']['screen_name']\r\n \r\n # Check if this is a 140+ character tweet\r\n if 'extended_tweet' in tweet_obj:\r\n # Store the extended tweet text in 'extended_tweet-full_text'\r\n tweet_obj['extended_tweet-full_text'] = tweet_obj['extended_tweet']['full_text']\r\n \r\n if 'retweeted_status' in tweet_obj:\r\n # Store the retweet user screen name in 'retweeted_status-user-screen_name'\r\n tweet_obj['retweeted_status-user-screen_name'] = tweet_obj['retweeted_status']['user']['screen_name']\r\n\r\n # Store the retweet text in 'retweeted_status-text'\r\n tweet_obj['retweeted_status-text'] = tweet_obj['retweeted_status']['text']\r\n \r\n tweets_list.append(tweet_obj)\r\n return tweets_list", "def read_twitter_json(f):\n tweets = list()\n with open(f) as json_file:\n for line in json_file:\n tweets.append(json.loads(line))\n return tweets", "def twitter_get_timeline(self):\n if self.twitter_bearer_token is None:\n return None\n\n url = 'https://api.twitter.com/1.1/statuses/user_timeline.json?count=100&screen_name=' + \\\n self.private_data['twitter']['screen_name']\n\n headers = {'Authorization': 'Bearer %s' % self.twitter_bearer_token,\n 'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8'}\n\n resp = requests.get(url, headers=headers)\n tweets = []\n if resp.status_code == 200:\n content = json.loads(resp.content)\n for i in range(0, len(content)):\n tweets.append(content[i]['text'])\n else:\n print('ERROR: unable to retrieve timeline')\n print(resp.content)\n\n return tweets", "def get_tweets(api, listOfTweets, keyword, numOfTweets=20, date_since='2019-1-1', lang=\"en\"):\n spinner = yaspin()\n spinner.start()\n for tweet in tweepy.Cursor(api.search, q=keyword, lang=lang, since=date_since).items(numOfTweets):\n # Add tweets in this format\n dict_ = {'Screen Name': tweet.user.screen_name,\n 'User Name': tweet.user.name,\n 'Tweet Created At': str(tweet.created_at),\n 'Tweet Text': tweet.text,\n 'Cleaned Tweet Text': func.clean_tweets(tweet.text),\n 'User Location': str(tweet.user.location),\n 'Tweet Coordinates': str(tweet.coordinates),\n 'Retweet Count': str(tweet.retweet_count),\n 'Retweeted': str(tweet.retweeted),\n 'Phone Type': str(tweet.source),\n 'Favorite Count': str(tweet.favorite_count),\n 'Favorited': str(tweet.favorited),\n 'Replied': str(tweet.in_reply_to_status_id_str)\n }\n listOfTweets.append(dict_)\n spinner.stop()\n return listOfTweets", "def get_tweets():\n\n\tuser ='kaiserkumars'\n\t# api = twitter.Api(consumer_key='iJoZZuV7etVrJfE4K9ir8sIqa',\n\t# consumer_secret='uyJyWoP05z2MUKnggW7vHnIG2sckmM1aHRMgGveZLyrz8401Xs',\n\t# access_token_key='622588040-TYDgG1UlGUvA1hW8PA7mOG5CiMw0WiuPZlkoP8cc',\n\t# access_token_secret='laAmFjeLhWzOK7Y524VevdMdeLeNpnmCUmjee1AQU7osj')\n\tapi = twitter.Api(consumer_key=get_secret('consumer_key'),\n\t consumer_secret=get_secret('consumer_secret'),\n\t access_token_key=get_secret('access_token_key'),\n\t access_token_secret=get_secret('access_token_secret'))\n\n\tstatuses = api.GetUserTimeline(user_id=622588040,count=0)\n\t# print(statuses)\n\t# duplicate='UNIQUE constraint failed: mtwitter_weatherdata.location, core_weatherdata.metric, core_weatherdata.date'\n\tbulk_insert=[]\n\t# print(dir(TwitterData))\n\tfor s in statuses:\n\t\t# print(s)\n\t\tdt = parse(s.created_at)\n\t\t# print(dt)\n\t\tdata = TwitterData(org_name=s.user.name,profile_url=s.user.profile_image_url,tweet_id =s.id,screen_name=s.user.screen_name, tweet = s.text, date= dt, favCount =0)\n\t\tbulk_insert.append(data)\n\ttry:\n\t\tTwitterData.objects.bulk_create(bulk_insert)\n\t\tprint(\"Success.\")\n\texcept Exception as e:\n\t\t# if(str(e)==duplicate):\n\t\t# \tprint('Duplicate Data')\n\t\t# else:\n\t\tprint(str(e))\n\n\treturn statuses", "def get_tweets(api, username, fh, limit):\n if args.json is False:\n for status in tqdm(tweepy.Cursor(api.user_timeline, screen_name=username).items(limit), unit=\"tw\", total=limit):\n process_tweet(status)\n if args.save:\n fh.write(str(json.dumps(status._json))+\",\")\n else:\n for status in (tweepy.Cursor(api.user_timeline, screen_name=username).items(limit)):\n process_tweet(status)\n if args.save:\n fh.write(str(json.dumps(status._json))+\",\")", "def gettweets(request):\n temp = json.loads(request.body)\n print (temp['hashtags'])\n return Response(tw_fetcher.gethashes(temp['hashtags']), status=status.HTTP_201_CREATED)", "def get_retweets():\r\n\r\n retweets = models.Retweet.query.all()\r\n output = []\r\n\r\n for retweet in retweets:\r\n original_tweet = models.Tweet.query.get(retweet.post_id)\r\n retweet_data = {\r\n 'content': original_tweet.text_content,\r\n 'retweet_user': retweet.username,\r\n 'tweet_id': original_tweet.id,\r\n 'tweet_user': original_tweet.username,\r\n 'timestamp': retweet.timestamp.isoformat()\r\n }\r\n\r\n output.append(retweet_data)\r\n\r\n return {\"retweets\": output}", "def getTweetById(tweetId):\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName=\"apiConf2.txt\"))\n tmpTweet = api.get_status(tweetId, tweet_mode=\"extended\")\n tmpTweet._json['text']=tmpTweet._json['full_text']\n del (tmpTweet._json['full_text'])\n return tmpTweet._json", "def get_tweet(username, n):\n return twitterAPI.home_timeline(count=n)[-1:][0] # return specified tweet", "def extract_tweets(path):\n dict_list = []\n\n for line in open(path):\n loaded = json.loads(line)\n dict_list.append(loaded)\n\n text = \"\"\n for item in dict_list:\n '''\n try:\n tweet = item[\"text\"]\n #filter(lambda x: x in set(string.printable), tweet)\n text += text\n except UnicodeEncodeError:\n pass\n '''\n tweet = str(item[\"text\"].encode('ascii', 'ignore'))\n #filter(lambda x: x in set(string.printable), tweet)\n text += tweet\n\n return text", "def get_tweets(n=1):\n tweets = list(collection.find())[-n:]\n return tweets", "def fetch_tweets(n_tweets=100, data_home=None, token=None, tweets_ids=None):\n pass", "def get_tweets(twitter, screen_name, num_tweets):\n\n request = robust_request(twitter, 'search/tweets', {'q': screen_name, 'count': num_tweets})\n tweets = [a['text'] for a in request]\n\n return tweets", "def get_tweets(self):\n\t\ttweets = ''\n\t\tfor each in self.tweets_posted:\n\t\t\ttweets += each.timeline_format() + '\\n'\n\t\ttweets = tweets.strip('\\n')\n\t\treturn tweets", "def process_query(api, query):\n last_tweet_id = None if 'LastTweetId' not in query else int(query['LastTweetId']['N'])\n results = api.GetSearch(result_type=\"recent\", term=query['Term']['S'],\n count=25, lang=\"en\", since_id=last_tweet_id)\n new_tweets = []\n if results:\n latest_tweet_id = results[0].id\n for tweet in results:\n if last_tweet_id is not None and tweet.id <= last_tweet_id:\n break\n new_tweets.append(tweet)\n store_tweets(query, new_tweets)\n update_last_tweet(query, latest_tweet_id)\n return len(new_tweets)", "def get_tweets():\n clean_tweetdb.delay()\n db_tweets = Tweet.objects.all()\n max_id = min([tweet.tweet_id for tweet in db_tweets])\n tweets = api.search(\n q='#python',\n max_id=max_id,\n count=100\n )\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n\n for i, j, k, l, m, n in zip(\n tweets_id,\n tweets_date,\n tweets_source,\n tweets_favorite_cnt,\n tweets_retweet_cnt,\n tweets_text,\n ):\n try:\n Tweet.objects.create(\n tweet_id=i,\n tweet_date=j,\n tweet_source=k,\n tweet_favorite_cnt=l,\n tweet_retweet_cnt=m,\n tweet_text=n,\n )\n except IntegrityError:\n pass", "def loadTweets(filename):\n tweets = open(filename, 'r').read().splitlines()\n print \"Loading %d tweets from %s ...\" % (len(tweets), filename)\n tweetObjects = []\n for tweet in tweets:\n try:\n js = json.loads(tweet)\n if (not ('place' in js)) or js['place'] == None:\n continue\n elif (not ('full_name' in js['place'])):\n continue\n elif (not ('geo' in js)) or js['geo'] == None:\n continue\n elif (not ('coordinates' in js['geo'])):\n continue\n coords = js['geo']['coordinates']\n place = js['place']\n tweetObject = Tweet(js['text'], place['full_name'], coords[0], coords[1], place['country'], js['created_at'])\n tweetObjects.append(tweetObject)\n except ValueError:\n pass\n print \"Loaded %d tweets\" % len(tweetObjects)\n return tweetObjects", "def load_tweets(fname):\n tweets = []\n for line in open(fname):\n tweets.append(json.loads(line))\n return tweets", "def load_tweets(brand):\n\n api = twitter.Api(\n consumer_key=os.environ['TWITTER_CONSUMER_KEY'],\n consumer_secret=os.environ['TWITTER_CONSUMER_SECRET'],\n access_token_key=os.environ['TWITTER_ACCESS_TOKEN_KEY'],\n access_token_secret=os.environ['TWITTER_ACCESS_TOKEN_SECRET'])\n\n twitter_handle = find_twitter_handle(brand)\n\n if not twitter_handle:\n return []\n else:\n results = []\n count = 0\n\n # keep querying api for media posts until we get 20 media posts, up to a max of 10 queries\n while len(results) < 20 and count < 10:\n if count == 0:\n # the first time we query, there will not be a max id\n response = api.GetUserTimeline(screen_name=twitter_handle[1:], include_rts=False, count=200, exclude_replies=True)\n else:\n # the subsquent times we query, the max id should be set to the oldest post, which will be tweets_lst[-1]\n # the next query will get the prior 200 posts prior to that oldest post\n response = api.GetUserTimeline(screen_name=twitter_handle[1:], max_id=max_id, include_rts=False, count=200, exclude_replies=True)\n\n tweets_lst = []\n\n for tweet in response:\n tweets_lst.append(tweet._json)\n\n # for each twitter post, search for media in the post and add it to the results list\n for status in tweets_lst:\n if \"entities\" in status:\n if \"media\" in status[\"entities\"]:\n results.append(status[\"entities\"][\"media\"][0][\"media_url\"])\n\n # if there are twitter posts, but less than 20 results, then we should set the max id to the oldest post, and then next time\n # the loop runs it will query starting from the previously oldest post\n if len(tweets_lst) > 1:\n max_id = tweets_lst[-1][\"id\"]\n # if there is less than or only one tweet, then return what we already have in the results list\n else:\n return results\n # add one to the count to track we do not exceed the 10 query count\n count = count + 1\n return results", "def read_hourly_json(filename):\n tweets = []\n for line in open(filename, 'r'):\n try:\n tweets.append(json.loads(line))\n except ValueError:\n continue\n return tweets", "def get_last_text_post(self):\n with self.__connection.cursor() as cursor:\n sql = \"\"\"SELECT * FROM `ow_newsfeed_action`\n WHERE `id`= (SELECT MAX(`id`) FROM `ow_newsfeed_action` WHERE `entityType`=\"user-status\")\n AND `entityType`=\"user-status\"\n \"\"\"\n cursor.execute(sql)\n response = cursor.fetchone()\n data = json.loads(response[\"data\"])[\"status\"]\n return data", "def get_tweet(self, id):\r\n return self.tweets[id]", "def crawl(self):\n retrievedTweets = []\n\n count = 1\n \n today = datetime.datetime.now()\n today = today.replace(hour=23, minute=59, second=59, microsecond=999999)\n gap = 1\n yesterday = today - datetime.timedelta(gap) \n nextDay = yesterday + datetime.timedelta(gap)\n \n while True:\n try:\n lst = tweepy.Cursor(self.api.search, lang='en', q=self.keyword, count=50, until=nextDay.date(), result_type='popular').items(50)\n for tweet in lst:\n self.data = [tweet.created_at, tweet.id, tweet.text,\n tweet.user._json['screen_name'], tweet.user._json['name'], \n tweet.favorite_count, tweet.retweet_count, tweet.user.location]\n self.data = tuple(self.data)\n retrievedTweets.append(self.data)\n break\n except tweepy.TweepError as e:\n print(e.reason)\n continue\n except StopIteration: \n break\n\n return retrievedTweets", "def get_tweets(self, kafka_obj):\n\n try:\n\n # call twitter api to fetch tweets\n # for tweet in api.search('#machinelearning', count=5):\n\n for tweet in tweepy.Cursor(api.search, q='#machinelearning', since='2019-06-25', until='2019-07-07').items():\n\n # empty dictionary to store required params of a tweet\n parsed_tweet = dict()\n parsed_tweet['text'] = tweet.text\n parsed_tweet['date'] = str(tweet.created_at)\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)\n parsed_tweet['tweet_id'] = tweet.id_str\n parsed_tweet['location'] = tweet.user.location\n parsed_tweet['user'] = tweet.user.screen_name\n parsed_tweet['retweet_count'] = tweet.retweet_count\n\n if tweet.entities.get('hashtags'):\n parsed_tweet['hashtags'] = ', '.join([i['text'] for i in tweet.entities.get('hashtags')])\n else:\n parsed_tweet['hashtags'] = ''\n \n print('Search API', parsed_tweet)\n\n #Pushing all the tweets to the Kafka Topic\n\n kafka_producer = kafka_obj.producer_instance()\n kafka_obj.publish_urls(kafka_producer, 'twitter', 'tweet', json.dumps(parsed_tweet))\n\n except Exception as e:\n print(e)", "def read_tweets():\n data = []\t\n try:\n with open(os.path.dirname(__file__) + '/../tweet_input/tweets.txt') as f:\n for line in f:\n jfile = json.loads(''.join(line))\n data.append(jfile)\n return data\n except:\n print(\"File not found\")\n return []", "def get_tweets(self, user, count):\n topTweetsList = self.api.user_timeline(screen_name=user, count=count, tweet_mode='extended')\n clnTweets = {}\n for tweet in topTweetsList:\n clnTweets[processTweet(getNonRetweet(tweet))] = ({'like':getFavoriteCount(tweet),'RT':getNumRetweet(tweet),'follower':getNumFollowers(tweet)}) \n\n tweetTxt = [twt for twt in clnTweets.keys()]\n \n if user in self.userTweetsStat:\n self.userTweetsStat[user].append(clnTweets)\n else:\n tmp = []\n tmp.append(clnTweets)\n self.userTweetsStat[user] = tmp\n return tweetTxt, self.userTweetsStat", "def recoverTweets(authors=[], words=[], removeRetweets=False, sortBy='newest',**kwargs):\n authors = mapToValid(authors)\n words = mapToValid(words)\n\n def getTopNTweets(retrievedTweets, numberOfTweets):\n \"\"\"Sort the retrievedTweets by sortBy specified and returns the top-N Tweets\"\"\"\n if sortBy=='newest':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['id'], reverse=True)\n elif sortBy=='oldest':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['id'],reverse=False)\n elif sortBy=='favorite_count':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['favorite_count'],reverse=True)\n elif sortBy=='retweet_count':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['retweet_count'],reverse=True)\n else:\n retrievedTweets = random.sample(retrievedTweets, numberOfTweets)\n return retrievedTweets[:numberOfTweets]\n\n def getTweetsByUser(username, maxTweets=1000):\n \"\"\"Returns a list of (json) objects representing the tweets for a specified Twitter username.\n If any words is queried, it will filter out every tweet that doesn't contain any of those words.\"\"\"\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName=\"apiConf2.txt\"))\n myTweets=[]\n if words:\n apiRes = tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items()\n for tweet in apiRes:\n if any(containsWord(tweet._json['full_text'],word) for word in words):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n if sortBy=='newest':\n for tweet in tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items(maxTweets):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n for tweet in tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items():\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n\n return getTopNTweets(myTweets, maxTweets)\n\n def searchTweets():\n \"\"\" returns a list of (json) objects representing the tweets retrieved for a specified query.\n It doesn't work if any authors is specified.\n Then, startingDate and endingDate cannot be older than one week ago because of Twitter restrictions for standardAPI\n :reference: https://developer.twitter.com/en/docs/tweets/search/api-reference/get-search-tweets\n \"\"\"\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName='apiConf2.txt'))\n #SEARCHING TWEETS CONTAINING THE HASHTAG \"#bitcoin\" USING TWEEPY LIBRARY\n myTweets= []\n #words=list(map(str,words))\n if words:\n myQuery=' OR '.join(words)\n else:\n myQuery = '*'\n if removeRetweets:\n myQuery += ' - filter:retweets'\n kwargs['q']=myQuery\n kwargs['count']=100\n kwargs['tweet_mode']='extended'\n if 'startingDate' in kwargs:\n kwargs['since']=kwargs['startingDate']\n del(kwargs['startingDate'])\n if 'endingDate' in kwargs:\n kwargs['until']=kwargs['endingDate']\n del(kwargs['endingDate'])\n if 'maxTweets' in kwargs:\n del(kwargs['maxTweets'])\n if sortBy=='newest':\n for tweet in tweepy.Cursor(api.search, kwargs).items(maxTweets):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n for tweet in tweepy.Cursor(api.search, kwargs).items():\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n return getTopNTweets(myTweets, maxTweets)\n\n\n def getTwitterscraperTweets():\n \"\"\" returns a list of (json) objects representing the tweets retrieved for the specified inputs.\n It's very useful to avoid restrictions such as number of requests or dates not older than 7 days ago for twitterAPI (and tweepy).\n It will call the recoverTweets.sh script to properly query the API by twitterscraper.\n :reference: https://github.com/taspinar/twitterscraper\n \"\"\"\n import subprocess\n numOfAuthors = len(authors)\n numOfWords = len(words)\n callVars = ['./recoverTweets.sh',str(numOfWords),str(numOfAuthors)]\n callVars.extend([word for word in words]+[author for author in authors])\n if startingDate:\n callVars.extend(['-sd',startingDate])\n if endingDate:\n callVars.extend(['-ed',endingDate])\n #if maxTweets:\n # callVars.extend(['-max',str(maxTweets)])\n callVars.append(\"data/twitterscrapertmp\")\n print(\"Querying twitterAPI by using TwitterScraper... (it may take a long time)\")\n subprocess.call(callVars)\n with open('data/twitterscrapertmp') as json_data:\n tweets = json.load(json_data)\n if removeRetweets:\n tweets = [tweet for tweet in tweets if not isRetweet(tweet)]\n print(\"Query ended. Retrieved: \",len(tweets),\" tweets\")\n #saveTweets(tweets,outputCollection,onFile=True,onDb=True)\n os.remove('data/twitterscrapertmp')\n return tweets\n\n\n if \"maxTweets\" in kwargs:\n maxTweets=kwargs['maxTweets']\n else:\n maxTweets=1000\n\n if len(authors)==0 and len(words)==0:\n return(\"qua\") ###call sample function with maxTweets and (if any) dates\n if 'startingDate' in kwargs or 'endingDate' in kwargs:\n return getTwitterscraperTweets()\n\n if len(authors)!=0:\n tweets, splits, i = [], splitIntegerIntoIntegers(maxTweets,len(authors)), 0\n for author in authors:\n tweets.extend(getTweetsByUser(username=author, maxTweets=splits[i]))\n i+=1\n return tweets\n return getTweets()", "def tweets(request):\n if request.method == 'GET':\n max_items = request.GET.get('max_items') or _DEFAULT_MAX_ITEMS\n try:\n sentiments = models.Sentiment.objects.filter(is_tweet=True)[:max_items]\n serializer = models.SentimentSerializer(sentiments, many=True)\n return JSONResponse(serializer.data)\n except ObjectDoesNotExist:\n return JSONResponse([])\n return JSONResponse([], status=400)", "def get_tweets(hashtag):\n api = twitter.Api(consumer_key=TWITTER_API_CONSUMER_KEY,\n consumer_secret=TWITTER_API_CONSUMER_SECRET,\n access_token_key=TWITTER_API_ACCESS_TOKEN_KEY,\n access_token_secret=TWITTER_API_ACCESS_TOKEN_SECRET)\n\n query = (f\"q=%23{HASHTAG}%20-RT\"\n f\"&result_type=recent&since=2019-01-01&count={NUM_TWEETS}\")\n results = api.GetSearch(raw_query=query)\n\n return [\n format_tweet(tweet.AsDict())\n for tweet in results\n ]", "def merge_tweets_v1():\n formatted_tweet_list = []\n for filename in os.listdir('.'):\n if filename.endswith(\".json\"):\n with open(filename, 'r', encoding='utf8') as f:\n for line in f:\n formatted_content = dict()\n content = json.loads(line)\n formatted_content['id_str'] = content['identifier']\n formatted_content['full_text'] = content['text']\n\n metadata = content['metadata']\n entities = dict()\n for feature_name in ['hashtags', 'symbols', 'user_mentions', 'urls']:\n attribute_name = 'entities.{}'.format(feature_name)\n if attribute_name in metadata:\n entities[feature_name] = json.loads(metadata[attribute_name])\n entities['media'] = content['media']\n formatted_content['entities'] = entities\n\n formatted_content['created_at'] = metadata['created_at']\n for feature_name in ['retweet_count', 'favorite_count']:\n formatted_content[feature_name] = int(metadata[feature_name])\n formatted_content['user'] = dict()\n formatted_content['user']['verified'] = metadata['user.verified'] == 'true'\n if 'coordinates' in metadata and metadata['coordinates'] != 'null':\n formatted_content['coordinates'] = metadata['coordinates']\n\n formatted_tweet_list.append(formatted_content)\n\n outfile = '../data/tweets-content-merged.txt'\n with open(outfile, 'w', encoding='utf8') as fout:\n for tweet in formatted_tweet_list:\n fout.write(json.dumps(tweet) + '\\n')", "def read_tweets(data_path):\n\n json_list = []\n with open(data_path, 'r') as json_file_:\n for line in json_file_:\n json_file = json.dumps(ast.literal_eval(line))\n json_list += json_file,\n \n header = ['tweet_id', 'tweet', 'date', 'lang_twitter', 'retweeted', 'user_id']\n required_cols = itemgetter(*header)\n\n #with open(data_path) as f_input, open('out/'+data_path[:-4]+'.csv', 'w', newline='') as f_output:\n output = data_path.split(\"/\")[-1]\n output = 'out/{}.csv'.format(output[:-4])\n with open(output, 'w', newline='') as f_output:\n csv_output = csv.writer(f_output)\n csv_output.writerow(header)\n for row in json_list:\n if row.strip():\n tweet = json.loads(row)\n tweet['tweet_id'] = tweet['id_str']\n tweet['tweet'] = tweet['extended_tweet']['full_text'] if (\"extended_tweet\" in tweet or \"full_text\" in tweet) and bool(tweet[\"truncated\"]) else tweet['text']\n tweet['date'] = tweet['created_at']\n tweet['lang_twitter'] = tweet['lang']\n tweet['user_id'] = tweet['user']['id_str']\n csv_output.writerow(required_cols(tweet))\n \n return True", "def latest(self):\n\n for i in json_parsed:\n number = i['number']\n available_bike_stands=i['available_bike_stands']\n last_update=datetime.datetime.fromtimestamp(i['last_update']/1000, pytz.timezone('Europe/Dublin'))\n available_bikes=i['available_bikes']\n insert_latest(number, available_bike_stands, last_update, available_bikes)", "def merge_tweets_v2():\n filename_list = []\n for filename in os.listdir('.'):\n if filename.startswith(\"trecis\") and filename.endswith(\".json\") and not filename.startswith(\"trecis2019-B\"):\n filename_list.append(filename)\n filename_list = sorted(filename_list)\n\n formatted_tweet_list = []\n formatted_tweet_list_2019 = []\n count_inconsistent = 0\n for filename in filename_list:\n with open(filename, 'r', encoding='utf8') as f:\n for line in f:\n content = json.loads(line)\n formatted_content = json.loads(content['allProperties']['srcjson'])\n formatted_content['full_text'] = formatted_content['text']\n\n if 'entities' not in formatted_content:\n count_inconsistent += 1\n entities = dict()\n entities[\"symbols\"] = formatted_content['symbolEntities']\n entities[\"urls\"] = formatted_content['urlEntities']\n entities[\"hashtags\"] = formatted_content['hashtagEntities']\n entities[\"user_mentions\"] = formatted_content['userMentionEntities']\n entities[\"media\"] = formatted_content['mediaEntities']\n # To make the \"start\" and \"end\" API consistent with others\n for entity_name in [\"hashtags\", \"user_mentions\", \"urls\"]:\n for iEntity, entity in enumerate(entities[entity_name]):\n entity['indices'] = [entity['start'], entity['end']]\n entities[entity_name][iEntity] = entity\n formatted_content['entities'] = entities\n # Some other API convert\n formatted_content['retweet_count'] = formatted_content['retweetCount']\n formatted_content['favorite_count'] = formatted_content['favoriteCount']\n formatted_content['user']['favourites_count'] = formatted_content['user']['favouritesCount']\n formatted_content['user']['followers_count'] = formatted_content['user']['followersCount']\n formatted_content['user']['statuses_count'] = formatted_content['user']['statusesCount']\n formatted_content['user']['geo_enabled'] = formatted_content['user']['isGeoEnabled']\n formatted_content['user']['verified'] = formatted_content['user']['isVerified']\n formatted_content['user']['listed_count'] = formatted_content['user']['listedCount']\n formatted_content['user']['friends_count'] = formatted_content['user']['friendsCount']\n\n if filename.startswith(\"trecis2019\"):\n formatted_tweet_list_2019.append(formatted_content)\n else:\n formatted_tweet_list.append(formatted_content)\n\n if count_inconsistent > 0:\n print(\"There are {} tweets have inconsistent API about the entities, \"\n \"and they are automatically converted\".format(count_inconsistent))\n print(\"There are {0} tweets for 2018 and {1} tweets for 2019\".format(\n len(formatted_tweet_list), len(formatted_tweet_list_2019)))\n\n outfile = '../data/all-tweets.txt'\n with open(outfile, 'w', encoding='utf8') as fout:\n for tweet in formatted_tweet_list:\n fout.write(json.dumps(tweet) + '\\n')\n\n outfile = '../data/all-tweets-2019.txt'\n with open(outfile, 'w', encoding='utf8') as fout:\n for tweet in formatted_tweet_list_2019:\n fout.write(json.dumps(tweet) + '\\n')", "def populate_twitter_acct_tweets(retrieve_until_dt=datetime.now(tz=timezone.utc) - timedelta(days=60)):\n spinner = itertools.cycle(['|', '/', '-', '\\\\'])\n api = twitter.Api(**settings.TWITTER_OAUTH, sleep_on_rate_limit=False)\n twitter_accts = CredibleUSTwitterAccount.objects.all()\n\n while 1:\n for acct in twitter_accts:\n # acct_oldest_tweet = USTwitterNewsFeed.objects.filter(posted_by=acct).first()\n acct_oldest_tweet = USTwitterNewsFeed.objects.filter(posted_by=acct, created_datetime__gte=date(2018, 2, 7)).first()\n\n max_id = None\n if acct_oldest_tweet is not None:\n max_id = acct_oldest_tweet.feedid - 1\n\n # do api call 15 for each account times due to twitter rate limit\n for _ in range(15):\n feed_created_dt = None\n try:\n statuses = api.GetUserTimeline(screen_name=acct.screen_name, include_rts=False, max_id=max_id)\n for s in statuses:\n write_and_restart_line(next(spinner))\n created_feed = USTwitterNewsFeed.objects.create(posted_by=acct,\n created_datetime=datetime.strptime(s.created_at, '%a %b %d %X %z %Y'),\n text=s.text,\n feedid=s.id)\n max_id = created_feed.feedid - 1\n feed_created_dt = created_feed.created_datetime\n except TwitterError as e:\n print(e.message)\n except IntegrityError as e:\n print('integrity error')\n break\n\n # only retrieve until last status created datetime earlier than retrieve until\n # if (feed_created_dt is None) or (feed_created_dt < retrieve_until_dt):\n # break", "def get_top_trends_from_twitter_api(country='Japan', exclude_hashtags=True):\n # this stupid WOEID requires yweather to get (a library), because YAHOO itself has stopped supporting it\n # WOEID\n woeid_client = yweather.Client()\n woeid = woeid_client.fetch_woeid(location=country)\n\n check_rate_limit()\n\n if exclude_hashtags :\n trends = api.GetTrendsWoeid(woeid, exclude='hashtags')\n else:\n trends = api.GetTrendsWoeid(woeid, exclude=None)\n\n output = []\n images_output = []\n for trend in trends:\n trend = trend.AsDict()\n\n # get volumes\n try:\n tw_volume = int(trend['tweet_volume']),\n except:\n tw_volume = [0]\n\n # match time with timezone\n timestamp_str = trend['timestamp'] # this is utc\n timestamp_dt = str_2_datetime(timestamp_str, input_format=time_format_twitter_trends).replace(tzinfo=pytz.utc)\n\n # timestamp_local = timestamp_dt.astimezone(tz=pytz.utc)\n timestamp_utc_str = datetime_2_str(timestamp_dt, output_format=time_format_full_with_timezone)\n\n output.append({\n \"label\": trend['name'],\n \"volume\": tw_volume,\n \"time\": timestamp_utc_str,\n \"query\": trend['query'],\n \"url\": trend['url'],\n })\n\n images_output.append({\n \"label\": trend['name'],\n \"time\": timestamp_utc_str,\n \"tweets\": analyze_trending_keyword(trend['name'], count=50)\n })\n\n output_json = json.dumps(output, ensure_ascii=False)\n images_output_json = json.dumps(images_output, ensure_ascii=False)\n return output_json, images_output_json", "def format_tweets(keyword, jsonblob):\n results = jsonblob['results']\n\n output = []\n addline = output.append\n\n addline('<div class=\"tweet_container\">')\n addline('<h3>Recent #%s Tweets</h3>' % (keyword))\n\n for result in results:\n addline('<div class=\"tweet\">')\n addline(' <div class=\"tweetphoto\">')\n addline(' <a href=\"http://twitter.com/%s\">' % (result['from_user']))\n addline(' <img src=\"%s\" alt=\"%s\" title=\"%s\">' % (result['profile_image_url'], result['from_user'], result['from_user']))\n addline(' </a>')\n addline(' </div>')\n addline(' <div class=\"tweetstatus\">')\n addline(' %s <em><a href=\"http://twitter.com/%s/status/%s\">%s</a></em>' % (result['text'], result['from_user'], result['id'], result['created_at']))\n addline(' </div>')\n addline('</div>')\n\n return '\\n'.join(output)", "def get_user_tweets(api, screen_name, output_path):\n logger = logging.getLogger(__name__)\n logger.info('Pulling tweets')\n\n # Create empty list for tweet objects\n tweets = []\n # Pulls users must recent 200 tweets\n new_tweets = api.user_timeline(screen_name=screen_name, count=200)\n tweets.extend(new_tweets)\n oldest = tweets[-1].id - 1\n\n # Continues to pull tweets 200 at a time until limit is hit\n while len(new_tweets) > 0:\n new_tweets = api.user_timeline(screen_name=screen_name,\n count=200, max_id=oldest)\n tweets.extend(new_tweets)\n oldest = tweets[-1].id - 1\n\n logger.info(\"...%s tweets downloaded and cleaned\" % (len(tweets)))\n\n # Write all text of tweets to a file\n filename = screen_name + '.csv'\n file = open(join(output_path, filename), 'w')\n\n # Iterates through all tweets and cleans them before outputting\n for tweet in tweets:\n clean_tweet = clean_string(tweet.text)\n line = screen_name + ', ' + clean_tweet + '\\n'\n file.write(line)\n logger.info(\"Done pulling tweets for %s\" % screen_name)\n file.close()", "def tweet(self):\n try: \n return self._parsed_tweet\n except:\n if self.item_json:\n self._parsed_tweet = json.loads(self.item_json)\n else:\n self._parsed_tweet = {}\n return self._parsed_tweet", "def test_get_latest_dweet_for(self):\n dweepy.dweet_for(self.my_thing_id, test_data)\n\n dweets = dweepy.get_latest_dweet_for(self.my_thing_id)\n check_valid_get_response(self, dweets)", "def ajax_get_random_tweets(n):\r\n return dumps(get_random_tweets(int(n)))", "def get_posts(self, userid, username):\n dict_json = {}\n x = 0\n outfile_name = \"tweetsFrom\" + username + \".json\"\n posts = api.GetUserTimeline(user_id=userid, count=200)\n text_list = [p.text for p in posts]\n for text in text_list:\n dict_json[x] = text\n x += 1\n with open(outfile_name, \"w\") as outfile:\n json.dump(dict_json, outfile)\n outfile.close()", "def get_tweets():\n\n # Read bearer token from secrets file\n with open(\"./secrets.yml\", \"r\") as f:\n bearer_token = yaml.load(f, Loader=yaml.FullLoader)[\"BEARER_TOKEN\"]\n\n # Set start and end times as current time rounded down to nearest minute with supplied offset\n dt_fmt = \"%Y-%m-%dT%H:%M:00Z\"\n dt_now = datetime.datetime.now().replace(second=0, microsecond=0)\n start_time_offset = int(sys.argv[1])\n end_time_offset = int(sys.argv[2])\n dt_end = dt_now - datetime.timedelta(minutes=end_time_offset)\n dt_start = dt_now - datetime.timedelta(minutes=start_time_offset)\n dt_end = dt_end.strftime(dt_fmt)\n dt_start = dt_start.strftime(dt_fmt)\n\n # Make request, checking for mentions in specified time period\n logging.info(\"Getting mentions from Twitter\")\n uri = \"https://api.twitter.com/2/tweets/search/recent\"\n headers = {\"Authorization\": f\"Bearer {bearer_token}\"}\n query = {\"query\": f\"@{ACCOUNT_NAME}\",\n \"expansions\" : \"author_id\",\n \"user.fields\" : \"username\",\n \"start_time\" : dt_start,\n \"end_time\" : dt_end}\n response = requests.get(uri, headers=headers, params=query)\n\n # Make connection to local database\n connection = sqlite3.connect(\"../database/procrystaldb.db\")\n cursor = connection.cursor()\n\n # Get current total number of rows in database\n cursor.execute(\"SELECT COUNT(*) FROM Twitter;\")\n initial_rows = cursor.fetchall()[0][0]\n\n # Get usernames and tweet ids from tweets and save to database\n if response.status_code == 200:\n content = response.json()\n num_results = content[\"meta\"][\"result_count\"]\n if num_results > 0:\n # First get dictionary of usernames\n user_id_to_name = {}\n for user in content[\"includes\"][\"users\"]:\n user_id_to_name[user[\"id\"]] = user[\"username\"]\n # Then get tweet id, username and save to database\n for result in content[\"data\"]:\n # if KEYWORD in result[\"text\"].lower():\n tweet_id = result[\"id\"]\n username = user_id_to_name[result[\"author_id\"]]\n sql_insert = f\"\"\"\n INSERT OR IGNORE INTO Twitter (tweet_id, username, reply_sent)\n VALUES ('{tweet_id}', '{username}', false);\n \"\"\"\n cursor.execute(sql_insert)\n logging.info(f\"Mentions fetched: {num_results}\")\n else:\n logging.error(f\"Get mentions errored with: {response.json()}\")\n\n # Get final total number of rows in database and therefore number of rows added\n cursor.execute(\"SELECT COUNT(*) FROM Twitter;\")\n final_rows = cursor.fetchall()[0][0]\n rows_added = final_rows - initial_rows\n logging.info(f\"New mentions added: {rows_added}\")\n\n # Close database connection\n connection.commit()\n connection.close()\n\n return rows_added", "def get_tweets():\n if not Tweet.objects.all():\n # If the db is empty, don't get max_id.\n tweets = api.search(\n q='#python',\n count=100\n )\n else:\n # If the db is not empty, get max_id.\n subtask(clean_tweetdb)\n max_id = min([tweet.tweet_id for tweet in Tweet.objects.all()])\n tweets = api.search(\n q='#python',\n max_id=max_id,\n count=100\n )\n\n # Store the tweet data in lists.\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n\n # Iterate over these lists and add data to db.\n for i, j, k, l, m, n in zip(\n tweets_id,\n tweets_date,\n tweets_source,\n tweets_favorite_cnt,\n tweets_retweet_cnt,\n tweets_text,\n ):\n try:\n # Check that they are valid.\n Tweet.objects.create(\n tweet_id=i,\n tweet_date=j,\n tweet_source=k,\n tweet_favorite_cnt=l,\n tweet_retweet_cnt=m,\n tweet_text=n,\n )\n except IntegrityError:\n pass", "def load_tweets(self, max_items=10000, user=None):\n for name, info in self.users.items():\n try:\n os.mkdir(self.root + info['party'].lower().replace(' ', '_'))\n except FileExistsError:\n pass\n \n filepath = self.root + info['party'].lower().replace(' ', '_')\n filepath = filepath + '/' + name.lower().replace(' ', '')\n try:\n print(f'Reading tweets from {name}')\n user = info['screen_name']\n curs = tweepy.Cursor(self.api.user_timeline,\n screen_name=user,\n count=200,\n tweet_mode=\"extended\"\n ).items(max_items)\n\n with open(filepath + '.jsonl', 'w') as f:\n for status in curs:\n tweet = status._json\n json_dump_line(tweet, f)\n \n except tweepy.TweepError as exc:\n print(exc)\n os.remove(filepath + '.jsonl')", "def searchTweets():\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName='apiConf2.txt'))\n #SEARCHING TWEETS CONTAINING THE HASHTAG \"#bitcoin\" USING TWEEPY LIBRARY\n myTweets= []\n #words=list(map(str,words))\n if words:\n myQuery=' OR '.join(words)\n else:\n myQuery = '*'\n if removeRetweets:\n myQuery += ' - filter:retweets'\n kwargs['q']=myQuery\n kwargs['count']=100\n kwargs['tweet_mode']='extended'\n if 'startingDate' in kwargs:\n kwargs['since']=kwargs['startingDate']\n del(kwargs['startingDate'])\n if 'endingDate' in kwargs:\n kwargs['until']=kwargs['endingDate']\n del(kwargs['endingDate'])\n if 'maxTweets' in kwargs:\n del(kwargs['maxTweets'])\n if sortBy=='newest':\n for tweet in tweepy.Cursor(api.search, kwargs).items(maxTweets):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n for tweet in tweepy.Cursor(api.search, kwargs).items():\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n return getTopNTweets(myTweets, maxTweets)", "def json_write_tweets_data(content):\n count = 0\n all_data = []\n file_name = create_newfile()\n for data_200 in content:\n tweets = json.loads(data_200[0:len(data_200)])\n for tweet in tweets:\n count += 1\n data = {}\n data['text'] = tweet['text']\n data['favorite_count'] = tweet['favorite_count']\n data['retweet_count'] = tweet['retweet_count']\n dt = datetime.strptime(tweet['created_at'],\n '%a %b %d %H:%M:%S +0000 %Y')\n data['created_at'] = datetime.strftime(dt, '%Y-%m-%d %H:%M:%S.%f')\n data['id'] = tweet['id']\n data['source'] = tweet['source']\n all_data.append(data)\n file_name = create_newfile()\n with open(file_name, 'w') as f:\n json.dump(all_data, f, indent=1)\n f.close()", "def extract_tweet_info_from_local_file(tweet_js_path, max_extract=None, get_cleaned_df=False, item_to_extract=['id_str', 'created_at'], output_path=None, begin=None, end=None):\r\n\r\n if max_extract is None: # extract all tweets by default\r\n with open(tweet_js_path, encoding='utf-8') as f:\r\n all_data = f.read()\r\n max_extract = all_data.count('\\\"tweet\\\"')\r\n\r\n final_file_name = 'parsed_tweets_df.csv' if get_cleaned_df else 'parsed_tweets.json'\r\n if output_path is None:\r\n output_path = final_file_name\r\n elif '.csv' not in output_path:\r\n output_path = os.path.join(output_path, final_file_name)\r\n\r\n if os.path.isfile(final_file_name):\r\n print(\"Found {}, assumed already parsed. Exiting\".format(final_file_name))\r\n return\r\n else:\r\n # do the actual extraction\r\n extracted_info = []\r\n with open(tweet_js_path, encoding='utf-8') as f:\r\n res = get_tweet_object_from_tweet_js(f, max_extract)\r\n\r\n print(\"Extracted {} tweet objects.\".format(len(res)))\r\n\r\n begin_mark = int(begin) if begin is not None else 0\r\n end_mark = int(end) if end is not None else len(res)\r\n\r\n for obj in res[begin_mark:end_mark]:\r\n tmp = []\r\n json_obj = json.loads(obj)\r\n if get_cleaned_df:\r\n for item in item_to_extract: # assume that item is a valid attribute of a status object\r\n tmp.append(json_obj[item])\r\n extracted_info.append(tmp)\r\n else: # want the actual Tweet object\r\n extracted_info.append(json_obj)\r\n\r\n if get_cleaned_df:\r\n formatted_df = pd.DataFrame(extracted_info, columns=item_to_extract)\r\n formatted_df.to_csv(output_path, index=False)\r\n else:\r\n with open(output_path, 'w', encoding='utf8') as file:\r\n file.write(json.dumps(extracted_info, sort_keys=True, indent=4, ensure_ascii=False))", "def merge_tweets_v3():\n filename_list = []\n for filename in os.listdir('.'):\n if filename.startswith(\"trecis\") and filename.endswith(\".json\"):\n filename_list.append(filename)\n filename_list = sorted(filename_list)\n\n formatted_tweet_list_train = []\n formatted_tweet_list_test = []\n count_inconsistent = 0\n for filename in filename_list:\n with open(filename, 'r', encoding='utf8') as f:\n for line in f:\n content = json.loads(line)\n formatted_content = json.loads(content['allProperties']['srcjson'])\n formatted_content['full_text'] = formatted_content['text']\n\n if 'entities' not in formatted_content:\n count_inconsistent += 1\n entities = dict()\n entities[\"symbols\"] = formatted_content['symbolEntities']\n entities[\"urls\"] = formatted_content['urlEntities']\n entities[\"hashtags\"] = formatted_content['hashtagEntities']\n entities[\"user_mentions\"] = formatted_content['userMentionEntities']\n entities[\"media\"] = formatted_content['mediaEntities']\n # To make the \"start\" and \"end\" API consistent with others\n for entity_name in [\"hashtags\", \"user_mentions\", \"urls\"]:\n for iEntity, entity in enumerate(entities[entity_name]):\n entity['indices'] = [entity['start'], entity['end']]\n entities[entity_name][iEntity] = entity\n formatted_content['entities'] = entities\n # Some other API convert\n formatted_content['retweet_count'] = formatted_content['retweetCount']\n formatted_content['favorite_count'] = formatted_content['favoriteCount']\n formatted_content['user']['favourites_count'] = formatted_content['user']['favouritesCount']\n formatted_content['user']['followers_count'] = formatted_content['user']['followersCount']\n formatted_content['user']['statuses_count'] = formatted_content['user']['statusesCount']\n formatted_content['user']['geo_enabled'] = formatted_content['user']['isGeoEnabled']\n formatted_content['user']['verified'] = formatted_content['user']['isVerified']\n formatted_content['user']['listed_count'] = formatted_content['user']['listedCount']\n formatted_content['user']['friends_count'] = formatted_content['user']['friendsCount']\n\n if filename.startswith(\"trecis2019-B\"):\n formatted_tweet_list_test.append(formatted_content)\n else:\n formatted_tweet_list_train.append(formatted_content)\n\n if count_inconsistent > 0:\n print(\"There are {} tweets have inconsistent API about the entities, \"\n \"and they are automatically converted.\".format(count_inconsistent))\n print(\"There are {0} tweets for training and {1} tweets for testing\".format(\n len(formatted_tweet_list_train), len(formatted_tweet_list_test)))\n\n outfile = '../data/all-tweets.txt'\n with open(outfile, 'w', encoding='utf8') as fout:\n for tweet in formatted_tweet_list_train:\n fout.write(json.dumps(tweet) + '\\n')\n\n outfile = '../data/all-tweets-2019.txt'\n with open(outfile, 'w', encoding='utf8') as fout:\n for tweet in formatted_tweet_list_test:\n fout.write(json.dumps(tweet) + '\\n')", "def extract_tweets(consumer_key,consumer_secret,access_token,access_token_secret,search_key):\n # Step 1 - Authenticate\n consumer_key= str(consumer_key)\n consumer_secret= str(consumer_secret)\n\n access_token=str(access_token)\n access_token_secret=str(access_token_secret)\n\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n\n api = tweepy.API(auth)\n\n #Step 3 - Retrieve Tweets\n public_tweets = api.search(search_key)\n tweets_list=[]\n for tweet in public_tweets:\n tweets_list.append(tweet.text)\n return tweets_list", "def new_tweets(request):\n\n twitter_api = twitter.TwitterAPI(\"air quality\")\n\n if request.method == 'GET':\n max_items = request.GET.get('max_items') or _DEFAULT_MAX_ITEMS\n\n try:\n latest_tweet = models.Sentiment.objects.filter(is_tweet=True).latest('created')\n tweet_id = latest_tweet.tweet_id\n tweets = twitter_api.retrieve_new(tweet_id, max_items)\n except ObjectDoesNotExist:\n tweets = twitter_api.retrieve(max_items)\n\n # Serialize\n deserializer = models.SentimentSerializer()\n\n tweet_objs = []\n for idx, tweet_data in enumerate(tweets):\n tweet = deserializer.create(tweet_data)\n tweet.is_tweet = True\n tweet.save()\n tweet_objs.append(tweet)\n\n serialized = models.SentimentSerializer(tweet_objs, many=True)\n\n return JSONResponse(serialized.data)\n\n return JSONResponse([], status=400)", "def get_tweets_to_answer_to(api, last_tweet_id=0):\n\n parameters = {\"screen_name\": TARGET_SCREEN_NAME,\n \"include_rts\": False,\n \"count\": 200}\n if last_tweet_id > 0:\n parameters[\"since_id\"] = last_tweet_id\n\n last_tweets = api.statuses.user_timeline(**parameters)\n tweets_to_answer_to = sorted([tweet for tweet in last_tweets\n if TARGET_TWEET_REGEX.match(tweet[\"text\"])],\n key=lambda t: t[\"id\"])\n\n if last_tweets and not tweets_to_answer_to:\n # update the last tweet id so that we wont consider the same tweets\n # again next time\n update_last_tweet_id(sorted(last_tweets, key=lambda t: t[\"id\"])[-1][\"id\"])\n\n return tweets_to_answer_to", "def recent_posts(self):\n\n try:\n jsondoc = json.load(urllib.urlopen(\"http://reddit.com/user/%s.json\" % self.username))\n except:\n raise self.DoesNotExist\n \n posts = []\n for item in jsondoc['data']['children']:\n if item['kind'] == 't1':\n posts.append(Comment(item['data']))\n elif item['kind'] == 't3':\n posts.append(item['data'])\n\n return posts", "def json_posts_latest():\n posts = posts_base.order_by(Post.pubdate.desc())[:app.config['FEEDITEMS']]\n out = {'posts': []}\n for post_result in posts:\n post_dict = get_public_post_dict(post_result[0], post_result[2])\n out['posts'].append(post_dict)\n\n return jsonify(out)", "def get_tweets():\n\n return Tweet.query.all()", "def last(self, count=None):\r\n url = '{0}/{1}'.format(self.get_pull_url(), 'last')\r\n params = base.get_params(('count',), locals())\r\n\r\n return http.Request('GET', url, params), parsers.parse_json", "def populate_twitter_acct_tweets_by_date():\n api = twitter.Api(**settings.TWITTER_OAUTH, sleep_on_rate_limit=False)\n twitter_accts = CredibleUSTwitterAccount.objects.all()\n\n for acct in twitter_accts:\n results = api.GetSearch(raw_query=\"l=&q=from%3AReutersUS%20since%3A2017-12-01%20until%3A2017-12-02&src=typd\")", "def get_live_tweets_from_twitter_stream(auth, terms, num_tweets):\n listener = TwitterListener()\n listener._max_tweets = num_tweets\n twitter_stream = Stream(auth, listener)\n twitter_stream.filter(track=terms, languages=['en'])\n listener.store_live_tweets()", "def queryTerm2Twitter(term): \n statusList = api.GetSearch(term, count=100, result_type='recent')\n timeStampOfStatus = [datetime.fromtimestamp(i.created_at_in_seconds) for i in statusList]\n timeStampOfStatus.sort() \n return timeStampOfStatus[0]", "def get_tweets(username, amount):\n tweets = []\n twitter = Twython()\n\n finished = False\n page = 1\n while not finished:\n\n if amount <= 200:\n # Make the API call.\n search_results = twitter.getUserTimeline(screen_name=username,\n page=str(page), count=str(amount))\n finished = True\n\n else:\n # Make the API call.\n search_results = twitter.getUserTimeline(screen_name=username,\n page=str(page), count='200')\n amount -= 200\n page += 1\n\n if isinstance(search_results, dict) and search_results['error']:\n raise TwitterAPIException(str(search_results['error']))\n elif not search_results:\n raise TwitterAPIException('User has no tweets.')\n\n for result in search_results:\n tweets.append(result['text']) \n\n return tweets", "def jsonCreator(raw_data):\r\n tweets_data = []\r\n tweets_file = open(raw_data, \"r\")\r\n for line in tweets_file:\r\n try:\r\n tweet = json.loads(line)\r\n tweets_data.append(tweet)\r\n except:\r\n continue\r\n return tweets_data", "def collect_tweets(ticker):\n\n # Authenticate Tweepy credentials\n auth = tweepy.OAuthHandler(settings.TWITTER_CONSUMER_KEY, settings.TWITTER_SECRET_CONSUMER_KEY)\n auth.set_access_token(settings.TWITTER_TOKEN_KEY, settings.TWITTER_SECRET_TOKEN_KEY)\n api = tweepy.API(auth)\n\n stock = Stock.objects.get(ticker=ticker)\n\n # Search for recent Tweets with the specific ticker\n collected_tweets = api.search(q=ticker, result_type='recent', count=100)\n\n # Iterate over the collected Tweets and save them\n for tweet in collected_tweets:\n try:\n Tweet.objects.create(\n text=tweet.text,\n created_at=tweet.created_at,\n user_id=tweet.user.id,\n user_screen_name=tweet.user.screen_name,\n verified=tweet.user.verified,\n followers_count=tweet.user.followers_count,\n friends_count=tweet.user.friends_count,\n favourites_count=tweet.user.favourites_count,\n retweet_count=tweet.retweet_count,\n stock=stock,\n )\n except IntegrityError:\n pass", "def get_data(max_users = 30):\n\n #cache here\n\n\n mongo_db = pymongo.Connection('grande.rutgers.edu', 27017)['citybeat_production']\n tweets_collection = mongo_db['tweets']\n\n\n test_tweets = []\n seed_users = []\n\n\n\n try:\n with open('./cache_tweets.pkl'):\n tweets, test_tweets = pickle.load(open('./cache_tweets.pkl'))\n except:\n print 'in'\n # not here. fetch\n tweets = []\n for n, tweet in enumerate(tweets_collection.find({\"created_time\": {\"$gte\":\"1380643200\", \"$lt\":\"1380902400\"}})):\n tweet['text'] = re.sub(r\"(?:\\@|https?\\://)\\S+\", \"\", tweet['text'])\n tweet['text'] = re.sub(r'^https?:\\/\\/.*[\\r\\n]*', '', tweet['text'], flags=re.MULTILINE)\n tweets.append(tweet)\n print n\n\n #print 'len of tweets ', len(tweets), 'len of test = ', len(test_tweets)\n test_tweets = tweets[-100:-1]\n #pickle.dump((tweets, test_tweets), open('./cache_tweets.pkl','w'))\n\n tweets = [tweet for tweet in tweets if len(tweet['text'].split(' ')) >= 10]\n\n\n\n\n\n\n return tweets, test_tweets", "def list_user_tweets(username):\n userdata = query_db('select * from user where username = ?',\n [username], one=True)\n if userdata is None:\n abort(404)\n else:\n user_details = {\"username\": userdata['username'],\"user_id\":userdata['user_id']}\n\n followed = False\n if request.json.get('user_id') is not None:\n followed = query_db('''select 1 from follower where\n follower.who_id = ? and follower.whom_id = ?''',\n [request.json.get('user_id'), user_details.get('user_id')],\n one=True) is not None\n\n user_tweets = []\n if user_details is None:\n return jsonify({'message': 'User not found'}), 404\n tuples = query_db('''\n select message.*, user.* from message, user where\n user.user_id = message.author_id and user.user_id = ?\n order by message.pub_date desc limit ?''',\n [user_details['user_id'], PER_PAGE])\n\n for tuple in tuples:\n user_tweet = {}\n user_tweet[\"username\"] = tuple['username']\n user_tweet[\"email\"] = tuple['email']\n user_tweet[\"text\"] = tuple['text']\n user_tweet[\"pub_date\"] = tuple['pub_date']\n user_tweets.append(user_tweet)\n\n return jsonify({'user_tweets':user_tweets, 'followed' : followed, 'user_details':user_details}),200", "def get_tweets(api, query):\n \n results = []\n for tweet in tweepy.Cursor(api.search, q=query).items(1000):\n results.append(tweet)\n \n id_list = [tweet.id for tweet in results]\n #unpack into dataframe\n data = pd.DataFrame(id_list,columns=['id'])\n \n data[\"text\"]= [tweet.text.encode('utf-8') for tweet in results]\n data[\"datetime\"]=[tweet.created_at for tweet in results]\n data[\"Location\"]=[tweet.place for tweet in results]\n \n return data", "def getTweets(user,maxTweets=3000,count=0,tweetId=0,cacheKey=False,credentials=False):\n api = ratedTwitter(credentials=credentials)\n limit = api.get_user_timeline_limited()\n if limit:\n print '*** TWITTER RATE-LIMITED: statuses.user_timeline:'+user+':'+str(count)+' ***'\n raise getTweets.retry(countdown = limit)\n else:\n args = {'screen_name':user,'exclude_replies':False,'include_rts':True,'trim_user':False,'count':200}\n if tweetId:\n args['max_id'] = tweetId\n \n okay, result = api.get_user_timeline(**args)\n \n if okay:\n print '*** TWITTER USER_TIMELINE: '+user+':'+str(tweetId)+' ***'\n if result:\n newCount = count + len(result)\n if maxTweets:\n if newCount > maxTweets: # No need for the task to call itself again.\n pushTweets.delay(result,user,cacheKey=cacheKey) # Give pushTweets the cache-key to end the job.\n return\n else:\n pushTweets.delay(result,user)\n\n newTweetId = min([t['id'] for t in result]) - 1 \n # Not done yet, the task calls itself with an updated count and tweetId.\n getTweets.delay(user,maxTweets=maxTweets,count=newCount,tweetId=newTweetId,cacheKey=cacheKey,credentials=credentials)\n else:\n pushTweets.delay([],user,cacheKey=cacheKey) # Nothing more found, so tell pushTweets the job is done.\n else:\n if result == '404':\n setUserDefunct(user)\n cache.set('scrape_tweets','done')\n if result == 'limited':\n raise getTweets.retry(countdown = api.get_user_timeline_limited())", "def delete_tweets_from_js(json):\n tweets_id = get_js_ids(json)\n\n config = open_config()\n\n timestamps = get_delete_timestamps(config)\n\n api = get_api()\n\n tweets = map(lambda status: {\n \"id\": status[1].tweet[\"id\"], \"timestamp\": status[1].tweet[\"created_at\"]}, tweets_id.iterrows())\n\n delete_tweets_by_id_js(api, tweets, timestamps[2])\n\n logger.info('done from json')", "def read_tweet(tweet_line):\n\ttweet = json.loads(tweet_line)\n\t#get text \n\ttry:\n\t\ttweet_text = tweet['text']\n\texcept:\n\t\treturn \"\"\n\t# get only tweets in english\n\tif \ttweet['lang'] != 'en':\n\t\treturn \"\"\n\treturn tweet_text.encode('utf-8')" ]
[ "0.7257259", "0.7204354", "0.71714604", "0.69285256", "0.6901832", "0.68701154", "0.6827814", "0.6807941", "0.67518646", "0.6662134", "0.665305", "0.66091895", "0.65875465", "0.65313274", "0.64886", "0.6487753", "0.64813256", "0.64548606", "0.6451675", "0.6448758", "0.64238", "0.63893956", "0.6387412", "0.63857037", "0.6372143", "0.63484645", "0.63463384", "0.6342303", "0.63312346", "0.6327203", "0.6318416", "0.6302423", "0.6299192", "0.6296601", "0.6292296", "0.6286682", "0.6282192", "0.6265471", "0.6244183", "0.6243509", "0.6218796", "0.61973685", "0.6194141", "0.6191029", "0.6179624", "0.6161837", "0.61563563", "0.61249083", "0.6121609", "0.61193836", "0.6116823", "0.6097281", "0.60704726", "0.6068218", "0.6068053", "0.6067617", "0.6067418", "0.604868", "0.60453653", "0.60215664", "0.6019659", "0.60153395", "0.60048217", "0.59960294", "0.5994073", "0.59930944", "0.59885013", "0.5982958", "0.5974427", "0.59722596", "0.5962186", "0.5960174", "0.5954606", "0.59289944", "0.59252536", "0.59217924", "0.59205556", "0.5920024", "0.59109175", "0.58945936", "0.5893789", "0.58880496", "0.5853513", "0.58401823", "0.5839508", "0.5837832", "0.5835219", "0.58271545", "0.5822184", "0.5807437", "0.58056295", "0.5802658", "0.5793204", "0.5783074", "0.5781902", "0.57784295", "0.57700074", "0.5769872", "0.57390094", "0.5734199" ]
0.6033949
59
record the modified tweet/tweeted structures on disk
def __update_local_tweets(self): f_tweets = open(f'{TWEETS}', 'w') f_tweeted = open(f'{TWEETED}', 'w') try: f_tweets.write(json.dumps(self.tweets, sort_keys=True, indent=4)) f_tweeted.write(json.dumps(self.tweeted, sort_keys=True, indent=4)) finally: f_tweets.close() f_tweeted.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self, tid, tmsg, tent, fp_rel=REL_PATH):\n # TODO\n # work out if saved previously and give better response\n \n\n # save to file system/rest api? where?\n # save to filesystem\n # TODO what day is this? localtime?\n fn = dt.fn_current_day(ext='json')\n\n # TODO fix: warning on hard coded file paths\n fp = os.path.join(fp_rel) \n #print(\"fn={}\\nfp={}\".format(fn, fp))\n\n # print(\"twitter.save() fp=<%s>\" % fp)\n if os.path.isdir(fp):\n fpn = os.path.join(fp, fn)\n with open(fpn, 'a') as f:\n # TODO: kill if fails\n #print(\"tid={}\\ntmsg={}\\ttent={}\".format(tid, tmsg, tent))\n line_py = self.build_data(tid, tmsg, tent)\n line_json = system.py2json(line_py)\n\n #print(\"line_py={}\".format(line_py))\n #print(\"line_json={}\".format(line_json))\n\n f.write(line_json)\n f.write('\\n') # stops braces butting up\n return True \n\n return False", "def save_tweet_data(data):\n text_buffer = json.dumps(data)\n text_buffer = text_buffer[1:-1]\n text_buffer = '%s,' % text_buffer\n\n with open('public/data/tweets.spool', 'wt') as file_handle:\n file_handle.write(text_buffer)\n\n print('Updated.')", "def record_trace(self):\n\n tfname = str(int(time.time())) + \".obd2_reader.trace\"\n self.tf_out = open(tfname, 'a')\n self.RecordTrace = 1\n print \"Recoding trace to:\", tfname", "def store_tweet(tweet, keyword):\n\tglobal _docs_to_store\n\tdoc = {'tweet': tweet, 'keyword': keyword, 'timestamp': int(time.time())}\n\t_docs_to_store.append(doc)\n\tif len(_docs_to_store) == UPDATE_CHUNK:\n\t\tcloudant.update(_docs_to_store)\n\t\t_docs_to_store = []", "def __refresh_local_tweets(self):\n f_tweets = open(f'{TWEETS}', 'r')\n f_tweeted = open(f'{TWEETED}', 'r')\n\n try:\n self.tweets = json.load(f_tweets)\n self.tweeted = json.load(f_tweeted)\n finally:\n f_tweets.close()\n f_tweeted.close()", "def exportToDB(self, tweets):\n for t in range(len(tweets)):\n for x in range(len(tweets[t])):\n doc_ref = self.fs_db.collection(u'twitter').document(str(tweets[t][1]))\n doc_ref.set({\n u'created_date': str(tweets[t][0]),\n u'id': str(tweets[t][1]),\n u'tweet': tweets[t][2],\n u'screen_name': tweets[t][3],\n u'name': tweets[t][4],\n u'likes': tweets[t][5],\n u'retweets': tweets[t][6],\n u'location': tweets[t][7]\n })", "def on_data(self, tweet):\n if (time.time() - self.start_time) < self.limit:\n self.saveFile.write(tweet)\n return True\n else:\n self.saveFile.close()\n return False", "def TweetHandler(self):\n self.response.out.write('<br/><br/>Tweeting<br/>')\n self.response.out.write('this info will be tweeted:<br/>')\n # oldest non-tweeted and prepared\n oldest_changeset = Changeset.all().order('created_at').filter('is_tweeted =', False).filter('is_prepared =', True).fetch(1)\n if not oldest_changeset:\n self.response.out.write('nothing to tweet')\n return\n else:\n c = oldest_changeset[0]\n \n config = get_config()\n\n # do not tweet from localhost\n if not 'localhost' in self.request.url:\n auth = tweepy.OAuthHandler(config[\"consumer_key\"], config[\"consumer_secret\"])\n auth_data = OAuthAccessToken.all().filter('specifier =', config[\"twitter_username\"]).fetch(1)[0]\n auth.set_access_token(auth_data.oauth_token, auth_data.oauth_token_secret)\n self.response.out.write('<br/>tweeting with oauth:<br/>')\n api = tweepy.API(auth)\n self.response.out.write(\"id: %d\" % c.id)\n self.response.out.write(\"user: %s\" % c.user)\n self.response.out.write(\"comment: %s\" % c.comment)\n self.response.out.write(\"tweet: %s\" % c.tweet)\n try:\n api.update_status(c.tweet)\n except tweepy.error.TweepError, e: \n self.response.out.write( 'failed: %s' % e.reason )\n if \"Status is a duplicate\" in e.reason:\n c.is_tweeted = True\n c.put()\n return\n else:\n self.response.out.write('<br/>localhost - nothing actually tweeted:')\n\n self.response.out.write('<br/>%s' % c.tweet)\n\n c.is_tweeted = True\n c.put()", "def write_to_file_ann(self) -> None:\n with open(self.output_file_path, mode='w', newline='') as csv_file:\n tweet = ['id', 'created_time', 'text']\n writer = csv.DictWriter(csv_file, fieldnames=tweet)\n writer.writeheader()\n for tweet in self.unique_tweets:\n try:\n writer.writerow(tweet)\n except:\n pass\n print(\"Tweets written to a file\")", "def record(self):\n # TODO: record the data", "def save(self):\n #test output\n pywikibot.output('PICKLING %s records at %s' % (len(self.historyDict),datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n with open(self.datfilename, 'wb') as f:\n pickle.dump(self.historyDict, f, protocol=config.pickle_protocol)", "def save_info(gran):\n if gran not in {\"states\", \"cities\"}:\n raise ValueError(\n \"'\" + gran + \"'\" + \" is invalid. Possible values are ('states' , 'cities')\")\n\n data_path = \"data/\" + gran\n if not os.path.exists(data_path):\n raise Exception(\"Missing dataset data for \" + gran +\n \"! Please run build_data() first.\")\n elif len(os.listdir(data_path)) == 0:\n raise Exception(\"Missing dataset data for \" + gran +\n \"! Please run build_data() first.\")\n\n ids_file = open(data_path + \"/tweet_ids.pickle\", \"rb\")\n ids = pickle.load(ids_file)\n\n dataset_file = open(data_path + \"/dataset.pickle\", \"rb\")\n dataset = pickle.load(dataset_file)\n\n file_out = open(data_path + \"/dataset_infos02.tsv\",\n 'a', encoding=\"utf-8\", newline='')\n writer = csv.DictWriter(file_out, fieldnames=fieldnames, delimiter=\"\\t\")\n writer.writeheader()\n\n files = [file for file in d._get_files(RAW_PATH)]\n for file in files:\n\n opened_file = open(file, 'r', encoding=\"utf-8\")\n start_time = time()\n tweets = [json.loads(line) for line in opened_file]\n print(\"finshed loading tweets \", time() - start_time)\n\n start_time = time()\n tweets_rows = [_tweet_data(tweet, dataset[subset(tweet)][ids[subset(tweet)].index(\n tweet['id'])]) for tweet in tweets if subset(tweet) in ids if tweet['id'] in ids[subset(tweet)]]\n print(\"finshed filtering tweets\", time() - start_time)\n\n start_time = time()\n writer.writerows(tweets_rows)\n print(\"finshed storing tweets\", time() - start_time)\n # for line in opened_file:\n # tweet = json.loads(line)\n # #subset = tweet['place']['full_name']\n # if subset in ids:\n # if tweet['id'] in ids[subset]:\n # id_index = ids[subset].index(tweet['id'])\n # clean_tweet = dataset[subset][id_index]\n # writer.writerow(_tweet_data(tweet, clean_tweet))\n # # if len(tweets_rows) % 1000000 == 0:\n # # writer.writerows(tweets_rows)\n # # print(len(tweets_rows))\n # #tweets_rows = []\n # # file_out.close()\n # #file_index += 1\n # #file_out = _set_file(file_index, gran)\n\n time_elapsed = time() - start_time\n print(\"file time done: \", time_elapsed, \" sec.\")\n\n print(\"Dataset TSV infos stored in \", os.path.abspath(\n data_path + \"/dataset_infos.tsv\"))", "async def add_tweet(self, tid=None): \n try:\n data=json.loads(self.request.body.decode('utf-8'))\n except: \n print(\"No data body!\")\n\n #print(\"Coordinates: {}\".format(data[\"coordinates\"]))\n if \"place\" in data:\n print(\"Place: {}\".format(data[\"place\"]))\n\n #print(\"User location: {}\".format(data[\"user\"][\"location\"]))\n #print(\"User lang: {}\".format(data[\"user\"][\"lang\"]))\n t=Tweet()\n t.tweet_id = tid\n t = self.fill_tweet(t, data)\n tweet_cache.append(t.to_dict())\n if \"retweeted_status\" in data:\n t.retweeted_status=data[\"retweeted_status\"]\n # \n # save the tweet\n #\n t.upsert()\n #\n # now handle the retweet\n #\n if \"retweeted_status\" in data:\n # this is a retweet so\n # do it once more for the original tweet\n tr=Tweet()\n tr.tweet_id = data[\"retweeted_status\"][\"id_str\"]\n tr = self.fill_tweet(tr, data[\"retweeted_status\"])\n tweet_cache.append(tr.to_dict())\n #tr.upsert()\n #r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #await self.fire_callbacks(r.json())\n #print(t.to_json(),file=ofile)\n #\n # get the embed html from twitter oembed API\n #\n r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #print(r.json())\n \n #print(self.__class__.callbacks)\n await self.fire_callbacks(r.json())\n #self.success(message=\"Added tweet id: {} \".format(str(id)), data=t.to_json(), format=\"json\", pure=True)", "def save_field(self, field_data, t):\n t0 = time.time()\n self.f << field_data\n t1 = time.time()\n log.debug(\"Saved field at t={} to file '{}' (snapshot #{}; saving took \"\n \"{:.3g} seconds).\".format(t, self.filename, self.counter, t1 - t0))\n self.counter += 1", "def update_last_tweet_id(last_tweet_id):\n\n if last_tweet_id:\n with open(LAST_TWEET_FILE, 'wb') as last_tweet_file:\n pickle.dump(last_tweet_id, last_tweet_file)", "def save(*messages):\n data = Parser.parse_texts(*messages[1:])\n hour = time.strftime(\"_%H_%M_%S\")\n today = time.strftime(\"_%d_%m_%Y\")\n title = Parser.parse_text(messages[0])\n\n file = open(\"./logs/\"+threading.currentThread().getName()+today+\".log\",'a+')\n file.write(\"\\n==\"+title+hour+\"==\\n\")\n if type(data) is dict: #Dictionary with each value being a triplet. From get_all_items\n for key in data.keys():\n file.write(Parser.parse_text(key) + \" -> \"+ Parser.parse_text(str(data[key].x)) +\"\\n\")\n elif type(data) is list: #From get_item, market item, attribute listings\n for listing in data:\n file.write(str(listing.id)+\" - \"+str(listing.price/100)+\" euros\\n\")\n else: #plain text\n file.write(Parser.parse_text(data))\n file.write(\"=====================================\\n\")\n file.close()", "def update_stored(unchanged, new_found, ex_dic, dat_tim):\n\n for sensor in unchanged:\n for dev in ex_dic['devices']:\n if sensor == dev['id']:\n dev['last_seen'] = str(dat_tim)\n\n for newbie in new_found:\n new_dict = dict()\n new_dict['id'] = newbie\n new_dict['first_seen'] = str(dat_tim)\n new_dict['last_seen'] = str(dat_tim)\n ex_dic['devices'].append(new_dict)\n if len(new_found) > 0:\n send_tweet(new_found)\n pass\n with open('last_seen.json', 'w') as out_file:\n json.dump(ex_dic, out_file, indent=4)", "def save_tweet(self, twitter) -> None:\n if isinstance(twitter, dict):\n json_data = twitter\n else:\n json_data = json.loads(twitter)\n\n try:\n breakpoint()\n self.db.tweets.find_one_and_update(\n {'id_str': json_data['id_str']},\n {'$inc': {'seq': 1}},\n projection={'seq': True, '_id': False},\n upsert=True,\n )\n except Exception as e:\n log.error(e)", "def write_to_file(self) -> None:\n with open(self.output_file_path, mode='w', newline='') as csv_file:\n tweet = ['id', 'created_time', 'text']\n writer = csv.DictWriter(csv_file, fieldnames=tweet)\n writer.writeheader()\n for tweet in self.clean_unique_tweets:\n try:\n writer.writerow(tweet)\n except:\n pass\n print(\"Tweets written to a file\")", "def after_epoch(self):\n line = ' '.join([str(k) + ': ' + str(v) for k, v in self.trainer.status.items()])\n with open(os.path.join(self.root_path, 'log.txt'), 'a+') as fout:\n fout.write(line + '\\n')", "def do_timestamp_messages(self, messages):\n timestamp = self.env.now\n self.reception_records[timestamp] = messages\n log.debug(\"{} recorded {}\".format(self, self.reception_records))", "def __save_tweet(self, twitter_result):\n timestamp = twitter_result['timestamp']\n\n # Remove +0000 from timestamp\n timestamp_split = timestamp.split(' ')\n timestamp = ''\n for piece in timestamp_split:\n if piece[0] is not '+':\n timestamp += piece + ' '\n\n # Remove trailing space\n timestamp = timestamp[:-1]\n\n # Cast to iso format\n timestamp = datetime.strptime(timestamp, \"%a %b %d %H:%M:%S %Y\").isoformat()\n\n crawl = self.mongo_controller.add_crawl_twitter(\n twitter_result['keyword_id'],\n twitter_result['tweet_id'],\n twitter_result['text'],\n twitter_result['likes'],\n twitter_result['retweets'],\n timestamp,\n return_object=True,\n cast=True,\n )\n\n app.send_task('process-crawl', kwargs={ 'crawl_dict': crawl.to_json() }, queue=queues['processor'])\n\n return crawl", "def save(self):\n return getattr(self, \"_tweets\", None)", "def tweet(self):\n self.__refresh_local_tweets()\n\n if not self.tweets:\n return\n\n tweet_obj = self.tweets[0]\n\n # upload picture\n media_id = self.__upload_media(tweet_obj[\"img\"])\n\n # tweet with text, and image\n if not media_id:\n return\n self.__post_status(tweet_obj[\"text\"], media_id)\n\n self.tweets.remove(tweet_obj)\n self.tweeted.append(tweet_obj)\n self.__update_local_tweets()", "def on_data(self, data):\n status = json.loads(data)\n # increase the counter\n self.counter += 1\n\n retweet, rt_user, tweet_text, created_time = organize_tweet(status) \n\n if status['user']['id_str'] in infos.twitterids:\n\n who = status['user']['id_str']\n\n try:\n replied_to = status['in_reply_to_screen_name']\n except:\n replied_to = 'NULL'\n \n else:\n \n who = status['user']['screen_name']\n \n try:\n replied_to = infos.twitterids[status['in_reply_to_user_id_str']]\n except:\n replied_to = 'NULL'\n \n tweet = {\n \n 'id': status['user']['id_str'], #status.user.id_str,\n 'who': who,\n 'replied_to': replied_to,\n 'retweeted': retweet, #status['retweeted'], #status.retweeted,\n 'retweeted_from': rt_user,\n 'text': tweet_text,\n 'timestamp' : created_time\n }\n\n #write to mongoDB here\n collection.insert_one(tweet)\n print(f'New tweet arrived: {tweet[\"text\"]}')\n\n\n # check if we have enough tweets collected\n if self.max_tweets == self.counter:\n # reset the counter\n self.counter=0\n # return False to stop the listener\n return False", "def store_action_log(self, filename):\n t = self.get_current_timeindex()\n camera_obs = self.get_camera_observation(t)\n self._action_log[\"final_object_pose\"] = {\n \"t\": t,\n \"pose\": camera_obs.object_pose,\n }\n\n with open(filename, \"wb\") as fh:\n pickle.dump(self._action_log, fh)", "def run():\n\n #open pickle if exists\n filename = input('Load pickle file: ')\n path = io.get_path(filename)\n print('loading pickle from {0}'.format(path))\n tweeters = io.load_pickle(path)\n if tweeters is not None:\n twt.update_tweets(tweeters)\n else:\n print('Downloading tweet dump:')\n tweeters = twt.get_peer_dict(config.nameslist)\n # twt.print_tweeter_names(tweeters)\n twt.load_tweets(tweeters, 0)\n twt.update_tweeters_stats(tweeters)\n twt.print_tweeter_stats(tweeters)\n\n # Monitors new tweets from nameslist\n twt.listener(config.target, tweeters, config.wait)\n\n filename = input('save pickle file: ')\n io.save_data_to_file(tweeters, filename)", "def save(self):\n #--Data file exists?\n filePath = self.path\n if os.path.exists(filePath):\n ins = open(filePath)\n outData = compat.uncpickle(ins)\n ins.close()\n #--Delete some data?\n for key in self.deleted:\n if key in outData:\n del outData[key]\n else:\n outData = {}\n #--Write touched data\n for key in self.changed:\n outData[key] = self.data[key]\n #--Pickle it\n tempPath = filePath+'.tmp'\n cPickle.dump(outData,open(tempPath,'w'))\n renameFile(tempPath,filePath,True)", "def __init__(self):\n self.timeStamp = 0\n self.tweetTimeLine = {}\n self.followList = {}", "def saveTeachersData():\n with open(\"TeacherData.txt\",\"wb\") as teacherData:\n pickle.dump(teacherEntities,teacherData)", "def record(self, content):\n datafile = open(self.filename, 'at')\n datafile.write('%s\\n' % content)\n datafile.close()", "def on_data(self, data):\n\n t = json.loads(data)\n\n\n if 'extended_tweet' in t:\n text = t['extended_tweet']['full_text']\n else:\n text = t['text']\n\n\n is_tweet_reply = t['in_reply_to_status_id'] == None\n is_quote = t['is_quote_status'] == False\n\n if 'RT' not in t['text'] and is_tweet_reply and is_quote:\n\n tweet = {'text': text, 'username' : t['user']['screen_name'],\n 'number_of_followers' : t['user']['followers_count'],\n 'location' : t['user']['location'], 'number_of_friends' : t['user']['friends_count'], 'retweet_count' :\n t['retweet_count']}\n\n\n logging.critical('\\n\\n\\nNEW TWEET INCOMING: ' + tweet['text']) \n \n \n load_tweet_into_mongo(tweet)\n logging.critical('\\n\\n\\nSUCCESSFULLY DUMPED INTO MONGO!')", "def save():", "def _mark_as_saved(self, message):\n sensor_type = message.get('sensor_type', 'default')\n self.last_written_for_sensor_type[sensor_type] = time.time()\n self.log(\"Updated last_saved for sensor type %s -> %r\" % (sensor_type,\n self.last_written_for_sensor_type))", "def log_all(self):\n self.save_raw()\n self.log()", "def write_data():", "def write_tweet(tweet):\n try:\n tweet_data = [tweet.date, tweet.content.encode('utf-8'), tweet.id, tweet.likeCount,\n tweet.replyCount,\n tweet.retweetCount, tweet.quoteCount,\n tweet.user.username, tweet.user.id, tweet.user.followersCount,\n tweet.user.friendsCount,\n tweet.user.statusesCount, tweet.user.verified, tweet.user.url, tweet.url]\n if tweet.mentionedUsers is not None:\n tweet_data.append([tweet.mentionedUsers])\n else:\n tweet_data.append(None)\n if tweet.quotedTweet is not None:\n tweet_data.append(tweet.quotedTweet.id)\n tweet_data.append(tweet.quotedTweet.content.encode('utf-8'))\n tweet_data.append(tweet.quotedTweet.user.username)\n tweet_data.append(tweet.quotedTweet.user.id)\n if tweet.quotedTweet.mentionedUsers is not None:\n tweet_data.append([tweet.quotedTweet.mentionedUsers])\n else:\n tweet_data.append(None)\n else:\n tweet_data.append(None)\n tweet_data.append(None)\n tweet_data.append(None)\n tweet_data.append(None)\n return tweet_data\n except UnicodeEncodeError:\n pass", "def local(self, text):\n\t\tlogf = open(\"update_log.txt\", \"a\")\n\t\tdate = datetime.datetime.now()\n\t\tlogf.writelines(\"[\" + str(date) + \"] \" + text + \"\\n\")\n\t\tlogf.close()", "def saveStatsFile(self):\n if not os.path.exists(\"stats\"):\n os.mkdir(\"stats\")\n now = datetime.datetime.now()\n parts = [now.year, now.month, now.day]\n parts = [\"%02d\"%x for x in parts]\n todaysFileName = \"-\".join(parts)+\".txt\" \n timeStamp = time.strftime(\"%y%m%d%H%M\", time.localtime())\n log = \",\".join(self.logLinesStats)\n fname = \"stats/\"+todaysFileName\n with open(fname, 'a') as f:\n f.write(timeStamp+\",\"+log+\"\\n\")\n self.log(\"wrote \"+fname)", "def record_score(self, score):\n file_name = self.config_folder + self.score_file\n score_file = open(file_name,'a+')\n epoch_time = int(time.time())\n entry = \"%s %s %s %s\\n\" % ( epoch_time, score['word'], score['attempts'], score['time'])\n score_file.write(entry)\n score_file.close()", "def save(cls):\n\n cls._set_mode_stopped()\n notes = Notes.get_text()\n timestamp = TimeDisplay.get_timestamp()\n TimeLog.add_to_log(timestamp, *TimeDisplay.get_time(), notes)\n LogView.refresh()\n Notes.clear()\n TimeDisplay.stop_time()\n TimeDisplay.reset_time(erase=True)\n for callback in cls.save_callback:\n callback()", "def data_to_file(tweets, tweetsTest, alltweets, user_path_train, context_path_train, user_path_dev, context_path_dev):\n # write in train data\n # open user file name\n user_file = open(user_path_train, \"w+\")\n # place user tweets - one per line - in a file\n for tweetid in tweets:\n # get text of tweet with tweetid from user\n for t in alltweets[0]:\n if t.id == tweetid:\n tweet = t.text\n break\n # add string to file\n user_file.write(tweet + \"\\n\")\n user_file.close()\n # open context file name\n context_file = open(context_path_train, \"w+\")\n # place context tweets - one per \"time\" - in a file\n for tid, c in tweets.iteritems():\n # concatenate all context tweets into one string\n tweet = \"\"\n for t in c:\n tweet = tweet + \" \" + t.text\n # write mega-tweet to file\n context_file.write(tweet + \"\\n\")\n context_file.close()\n\n # write in test data\n user_file_dev = open(user_path_dev, \"w+\")\n # place user dev tweets - one per line - in a file\n for tweetid in tweetsTest:\n # get text of tweet with tweetid from user\n for t in alltweets[0]:\n if t.id == tweetid:\n tweet = t.text\n break\n # add string to file\n user_file_dev.write(tweet + \"\\n\")\n user_file_dev.close()\n\n # open context dev file name\n context_file_dev = open(context_path_dev, \"w+\")\n # place context tweets - one per \"time\" - in a file\n for tid, c in tweetsTest.iteritems():\n # concatenate all context tweets into one string\n tweet = \"\"\n for t in c:\n tweet = tweet + \" \" + t.text\n # write mega-tweet to file\n context_file_dev.write(tweet + \"\\n\")\n context_file_dev.close()", "def writeData(metadata,ObjectList):\n\tpath = metadata['Path']\n\tfilename = metadata['Filename']\n\tfiletype = metadata['Format']\n\tprint('creating data to save to {}'.format(path+filename+'.pickle'))\n\ttry:\n\t\tdata = loadData(path,filename)[1]\n\texcept:\n \t\tdata = __initializeData()\n\tdata['Num Objects'] = len(ObjectList)\n\tif not data['Num Objects'] == metadata['Num Objects']:\n\t\tprint('warning: mistmatch between number of objects in metadata and ObjectList')\n\t\tprint('you may want to double check before any overwriting')\n\t\ttime.sleep(2)\n\n\tmessage = 'old data last saved on ' + data['Time_Written_POST']\n\t# save the post-processing data if indicate overwrite is ok\n\tif checkForOverwrite(data['Saved_POST'],message):\n\t\tfor i,object in enumerate(ObjectList):\n\t\t\tkey = \"object{}\".format(i)\n\t\t\tdata[key+'_ID'] = object.ID\n\t\t\tdata[key+'_time_POST'] = object.PostData.Time\n\t\t\tdata[key+'_x_POST'] = object.PostData.X\n\t\t\tdata[key+'_y_POST'] = object.PostData.Y\n\t\t\tdata[key+'_theta_POST'] = object.PostData.Theta\n\t\tdata['Saved_POST'] = True\n\t\tdata['Time_Written_POST'] = datetime.datetime.now().strftime('%m-%d-%Y, %H:%M')\n\telse:\n\t\tprint('old data kept, new data not written')\n\t# ONLY save live data if it was never saved before\n\tif not data['Saved_LIVE']:\n\t\tfor i,object in enumerate(ObjectList):\n\t\t\tdata[key+'_ID'] = object.ID\n\t\t\tdata[key+'_time_LIVE'] = object.LiveData.Time\n\t\t\tdata[key+'_x_LIVE'] = object.LiveData.X\n\t\t\tdata[key+'_y_LIVE'] = object.LiveData.Y\n\t\t\tdata[key+'_theta_LIVE'] = object.LiveData.Theta\n\t\tdata['Saved_LIVE'] = True\n\t\tdata['Time_Written_LIVE'] = datetime.datetime.now().strftime('%m-%d-%Y, %H:%M')\n\treturn data", "def save(self,outPath=None):\n if (not self.canSave): raise StateError(_(\"Insufficient data to write file.\"))\n if not outPath:\n fileInfo = self.fileInfo\n outPath = os.path.join(fileInfo.dir,fileInfo.name)\n out = file(outPath,'wb')\n #--Tes3 Record\n self.tes3.setChanged()\n self.tes3.hedr.setChanged()\n self.tes3.hedr.numRecords = len(self.records) #--numRecords AFTER TES3 record\n self.tes3.getSize()\n self.tes3.dump(out)\n #--Other Records\n for record in self.records:\n record.getSize()\n record.dump(out)\n out.close()", "def json_write_tweets_data(content):\n count = 0\n all_data = []\n file_name = create_newfile()\n for data_200 in content:\n tweets = json.loads(data_200[0:len(data_200)])\n for tweet in tweets:\n count += 1\n data = {}\n data['text'] = tweet['text']\n data['favorite_count'] = tweet['favorite_count']\n data['retweet_count'] = tweet['retweet_count']\n dt = datetime.strptime(tweet['created_at'],\n '%a %b %d %H:%M:%S +0000 %Y')\n data['created_at'] = datetime.strftime(dt, '%Y-%m-%d %H:%M:%S.%f')\n data['id'] = tweet['id']\n data['source'] = tweet['source']\n all_data.append(data)\n file_name = create_newfile()\n with open(file_name, 'w') as f:\n json.dump(all_data, f, indent=1)\n f.close()", "def postTweet(self, userId: int, tweetId: int) -> None:\n self.timeStamp -= 1\n self.tweetTimeLine[userId] = self.tweetTimeLine.get(userId, []) + [[self.timeStamp, tweetId]]", "def write_stats(self, directory):\n\n target_dir = os.path.join(directory, 'tweet_stats')\n if not os.path.exists(target_dir):\n os.makedirs(target_dir)\n\n # general stats\n self.stats_summary.append(\"%-30s\\t%12d\\n\" % ('Number of tweets', len(self)))\n self.stats_summary.append('%-30s\\t%-12s\\t%-12s' % ('Index', 'Type count', 'Token count'))\n\n for k in self.stats:\n k_stats = self.stats[k]\n\n rank = 0\n token_count = 0\n lines = []\n\n # Sort by frequency of words, pairs, triples, urls etc.\n k_stats_sorted = sorted(k_stats.iteritems(), key=operator.itemgetter(1), reverse=True)\n\n for val, card in k_stats_sorted:\n rank += 1\n token_count += card\n lines.append(\"%4d %-60s %5d\" % (rank, val, card))\n\n self.write_file(target_dir, \"%s.txt\" % k, \"\\n\".join(lines))\n\n # update summary with index name and corresponding type and token counts\n self.stats_summary.append('%-30s\\t%12d\\t%12d' % (k, len(k_stats), token_count))\n\n # write summary info\n self.write_file(target_dir, 'general.txt', \"\\n\".join(self.stats_summary))", "def write_telluric_transmission_to_file(wls,T,outpath):\n import pickle\n print('------Saving teluric transmission to '+outpath)\n with open(outpath, 'wb') as f: pickle.dump((wls,T),f)", "def save(self, ts):\n with open(self, 'w') as f:\n Timestamp.wrap(ts).dump(f)", "def save_index(self, fn):\n utils.save_obj(self.tweetTerms, \"TweetTerm_%s\" % (self.counterOfTweetTermsFiles))\n self.computeTfIdf(self.counterOfTweets)\n self.deleteSingleEntities()\n inv_dict = {'inverted_idx': self.inverted_idx, 'posting': self.postingFiles}\n utils.save_obj(inv_dict, fn)", "def postTweet(self, userId, tweetId):\n self.time += 1\n self.tweets[userId] = self.tweets.get(userId, []) + [(-self.time, tweetId)]", "def persist_file(tweets, directory):\n log.debug(\"{} tweets to gzipped file\".format(len(tweets)))\n\n filename = join(directory, \"{}.gz\".format(date.today()))\n with gzip.open(filename, \"a+\") as f:\n write(tweets, f)", "def _save_logs(self):\n logger.info(\"Saving call logs\")\n data = [c.to_dict() for c in self.logs[:29]]\n tichy.Persistance('calls/logs').save(data)", "def save(self):\n self.updated_at = datetime.now()", "def save(self):\n from models import storage\n self.updated_at = datetime.datetime.now()\n storage.save()", "def process_tweets(tweets_response, keep_all=False, debug=False):\n tweets = tweets_response\n\n #print(json.dumps(tweets, indent=4, ensure_ascii=False))\n\n output_tweets = []\n for tweet in tweets:\n # loop through every tweet\n output_tweet = {}\n output_tweet['likes'] = 0\n for k, v in tweet.items():\n if k == \"favorite_count\" or k == \"retweeted_status\":\n # print('checking favorite_count at {}'.format(k))\n # print(v)\n if k == \"favorite_count\" and v:\n output_tweet['likes'] = v\n elif k == \"retweeted_status\" and v:\n # print(\"rt:\", v)\n try:\n output_tweet['likes'] = v['favorite_count']\n except:\n print('favorites not found')\n print(v)\n pass\n\n elif k == \"media\" and v:\n # turn media dict into img url\n output_tweet[k] = []\n for m in v:\n output_tweet[k].append(m['media_url_https'])\n\n elif k == \"id\" and v:\n # make url from id and dispose id\n output_tweet['url'] = \"https://twitter.com/anyuser/status/\" + str(v)\n\n elif k == \"retweet_count\":\n if v:\n if debug: print(' picking this: ', k, v)\n output_tweet[k] = v\n else:\n if debug: print(' skipping this: ', k, v)\n # not keeping those with 0 RT\n output_tweet[k] = 0\n\n elif k == \"created_at\":\n tweet_creation_time = str_2_datetime(v, input_format=time_format_twitter_created_at)\n tweet_checked_time = datetime.datetime.now(tz=pytz.utc)\n\n output_tweet['timestamp'] = {\n \"created\": datetime_2_str(tweet_creation_time, output_format=time_format_full_with_timezone),\n \"last_checked\": datetime_2_str(tweet_checked_time, output_format=time_format_full_with_timezone)\n }\n\n else:\n # keep k:v same\n if debug: print('keeping this: ', k, repr(v))\n output_tweet[k] = v\n\n print('num of likes: ', output_tweet['likes'])\n\n output_tweets.append(output_tweet)\n\n output = []\n if not keep_all:\n for o in output_tweets:\n if o['likes'] > 0 and o['retweet_count'] > 0:\n output.append(o)\n else:\n output = output_tweets\n\n return output", "def save(self):\n from models import storage\n self.updated_at = datetime.now()\n storage.save()", "def clean_tweets(data):\n count = 0\n f = open(os.path.dirname(__file__) + '/../tweet_output/ft1.txt','w')\n for item in data:\n if item.get('text'):\n string=item['text'].encode('ascii','ignore')+' (timestamp: '+item['created_at']+')\\n'\n f.write(string)\n if item['text'].encode('ascii','ignore')!=item['text']:\n count=count+1\n f.write('\\n')\n string=str(count)+' tweets contained unicode.'\n f.write(string)\n f.close()", "def save(self):\n memento = self.create_memento()\n import datetime\n f = open(str(datetime.datetime.now()).replace(' ','_')+'.saved_story','w')\n cPickle.dump(memento,f)\n f.close()\n zcanvas.message(\"Saved!\")", "def post_to_twitter(tweet):\n auth = tweepy.OAuthHandler(\n os.environ['BLADAMADUR_CONSUMER_KEY'],\n os.environ['BLADAMADUR_CONSUMER_SECRET'])\n auth.set_access_token(\n os.environ['BLADAMADUR_ACCESS_TOKEN'],\n os.environ['BLADAMADUR_ACCESS_TOKEN_SECRET'])\n api = tweepy.API(auth)\n\n api.update_status(tweet)", "def save(self):\n context_obj = {}\n# list1 = [\"created_at\", \"updated_at\"]\n for key in FileStorage.__objects.keys():\n # if key not in list1:\n context_obj[key] = FileStorage.__objects[key].to_dict()\n# else:\n# context_obj[key] = value.isoformat()\n\n with open(FileStorage.__file_path, \"w\") as f:\n json.dump(context_obj, f)", "def on_data(self, data):\n\n t = json.loads(data) \n tweet = {\n 'text': t['text'],\n 'username': t['user']['screen_name'],\n 'followers_count': t['user']['followers_count']\n }\n\n logging.critical(f'\\n\\n\\nTWEET INCOMING: {tweet[\"text\"]}\\n\\n\\n')\n tweet_collection.insert({'username' : tweet['username'],'followers_count' : tweet['followers_count'], 'text' : tweet['text']})", "def publish_tweet(self, tweet):\n\n self.api.update_status(tweet)", "def write_record(self, input):\n \"\"\"\n input: dict (dict of key, elem to write to tf-record)\n \"\"\"\n features = collections.OrderedDict()\n for key, value in input.items():\n if self.schema[key][0] == \"fixed_len\":\n if self.schema[key][2] != []:\n shape = self.schema[key][2][0]\n if len(value) != shape:\n raise ValueError(\n \"`{}` has schema shape `{}`, but provided \\\n values `{}` has shape `{}`\".format(\n key, shape, value, len(value) # noqa\n )\n )\n\n if isinstance(value, six.text_type):\n value = six.ensure_binary(value, \"utf-8\")\n features[key] = self.schema_writer_fn[key](value)\n example_proto = tf.train.Example(features=tf.train.Features(feature=features))\n\n if self.tag == \"train\":\n if self.shuffle:\n index = random.choice(range(len(self.all_writer)))\n the_writer = self.all_writer[index]\n the_writer.write(example_proto.SerializeToString())\n self.examples_per_record[self.all_files[index]] += 1\n self.global_counter += 1\n else:\n\n # If global counter(no of individual records processed)\n # exceeds max_files_per_record then increment self.current_writer\n if self.global_counter > (self.current_writer + 1) * self.max_files_per_record:\n self.current_writer += 1\n self.current_file_name = \"{}_{}_{}.tfrecord\".format(self.file_name, self.tag, self.current_writer)\n self.examples_per_record[self.current_file_name] = 0\n self.current_file = tf.io.TFRecordWriter(self.current_file_name)\n self.temp_writers.append(self.current_file)\n\n the_writer = self.current_file\n the_writer.write(example_proto.SerializeToString())\n self.examples_per_record[self.current_file_name] += 1\n self.global_counter += 1\n\n if self.global_counter % self.verbose_counter == 0:\n logging.info(\"Wrote {} tfrecods\".format(self.global_counter))\n else:\n the_writer = self.all_writer[0]\n the_writer.write(example_proto.SerializeToString())\n self.global_counter += 1\n\n if self.global_counter % self.verbose_counter == 0:\n logging.info(\"Wrote {} tfrecods\".format(self.global_counter))", "def save(self, clause, parse):\n if type(clause) == int:\n clause = str(clause)\n\n orig_parse = self.orig[clause]\n corrections = list(dictdiffer.diff(orig_parse, parse))\n self.tracked[clause] = corrections\n\n with open(self.trackpath, 'w') as outfile:\n json.dump(self.tracked, outfile, indent=2, ensure_ascii=False)\n if not self.silent:\n spans = [\n f'clause {clause} saved (total {len(self.tracked)})'\n ]\n if corrections:\n spans.extend(str(c) for c in corrections)\n else:\n spans.append('no corrections')\n spans = '<br>'.join(spans)\n display(HTML(f'<div style=\"background:#9BD788;display:inline-block\">{spans}</div>'))", "def save_entries(self):\n with open(self.file_name, \"w\") as file:\n file.write('date,name,minutes,note\\n')\n for entry in self.entries:\n writer = csv.writer(file)\n writer.writerow([entry.date, entry.name, entry.minutes, entry.note])", "def save_user_tweets(user, n, auth):\r\n t = twitter.Twitter(auth=auth)\r\n print(\"Fetching %i tweets from @%s\" % (n, user))\r\n tweets = t.statuses.user_timeline(screen_name=user, count=n)\r\n print(\" (actually fetched %i)\" % len(tweets))\r\n for tweet in tweets:\r\n save_tweet(tweet, outfile)", "def update_stat_file(self):\n logfile = \"../data/{}_stat.json\".format(self.ID)\n statobj = {\n 'hp': self.hp,\n 'max_hp': MAX_TANK_HP,\n 'ammo': self.ammo,\n 'score': self.score,\n 'age': self.age,\n 'alive': not self.is_dead(),\n 'color': TANK_COLORS[self.color],\n }\n if USE_SIMULATOR:\n js.globals.handle_stat(self.ID, json.dumps(statobj))\n else:\n with open(logfile, 'w') as f:\n f.write(json.dumps(statobj))", "def Save(self):\n if not self._records:\n return\n records = list(self._records)\n stored_records = self._log.GetRecords()\n self._MergeRecords(records, stored_records)\n self._log.SetRecords(records[0:_MAX_NUM_RECORD])\n self._records.clear()", "def _backupLog(self, updateText):\n \taFile = \"archiving_log.txt\"\n \tos.rename( aFile, aFile+\"~\")\n \tdestination= open( aFile, \"w\" )\n \tsource= open( aFile+\"~\", \"r\" )\n \tfor line in source:\n \t\tdestination.write( line )\n \tdestination.write( str(updateText))\n \tsource.close()\n \tdestination.close()\n \tos.remove(aFile+\"~\")", "def write_data(self,word,wordstatus,badguess,missedletter,totalscore):\n global game_count\n global record_game\n global record_word\n global record_bad_guesses\n global record_missed_letters\n global record_total_score\n global record_word_status\n\n record_game.append(game_count)\n record_word.append(word)\n record_word_status.append(wordstatus)\n record_bad_guesses.append(badguess)\n record_missed_letters.append(missedletter)\n record_total_score.append(totalscore)\n\n game_count +=1", "def __init__(self):\n self.time = 0\n self.tweets = {}\n self.follows = {}", "def process_tweets(collection):\n\n\twith open('positive-tweets.txt') as p:\n\t\tprint \"{0}: Inserting positive tweets into mongo...\".format(datetime.now())\n\t\tfor tweet in p.readlines():\n\t\t\tcollection.insert({'tweet': tweet, 'sentiment': 1})\n\tp.close()\n\n\twith open('negative-tweets.txt') as n:\n\t\tprint \"{0}: Inserting negative tweets into mongo...\".format(datetime.now())\n\t\tfor tweet in n.readlines():\n\t\t\tcollection.insert({'tweet': tweet, 'sentiment': 0})\n\tn.close()", "def save(self, *args, **kwargs):\n self.modify_ts = datetime.now()\n super(ModelBase, self).save(*args, **kwargs)", "def save(self):\n self.updated_at = datetime.now()\n storage.save()", "def save(self):\n self.updated_at = datetime.now()\n storage.save()", "def save_meta_file(self, new_dict):\n try:\n self.logger.debug('Save new metadata file %s.', self.meta_file_path)\n if not self._meta_dict:\n self._meta_dict = {}\n\n self._meta_dict[\"schema\"] = \"http://telegram-messages-dump/schema/v/1\"\n\n if DumpMetadata.CHAT_NAME in new_dict:\n self._meta_dict[DumpMetadata.CHAT_NAME] = new_dict[DumpMetadata.CHAT_NAME]\n if DumpMetadata.LAST_MESSAGE_ID in new_dict:\n self._meta_dict[DumpMetadata.LAST_MESSAGE_ID] =\\\n new_dict[DumpMetadata.LAST_MESSAGE_ID]\n if DumpMetadata.EXPORTER in new_dict:\n self._meta_dict[DumpMetadata.EXPORTER] = new_dict[DumpMetadata.EXPORTER]\n\n self.logger.info('Writing a new metadata file.')\n with open(self.meta_file_path, 'w') as mf:\n json.dump(self._meta_dict, mf, indent=4, sort_keys=False)\n except OSError as ex:\n raise MetadataError(\n 'Failed to write the metadata file. {}'.format(ex.strerror))", "def save(statistic_entries):\n with open('learn.json', 'w') as file:\n json.dump(statistic_entries, file, indent=2)", "def read_tweets(self)-> None:\n self.no_of_tweets = len(self.list_of_files)\n for i in range(0, self.no_of_tweets):\n # for i in range(0,10): # running a small loop for testing purpose\n try:\n with open(self.list_of_files[i]) as json_file:\n file = json.load(json_file)\n tweet = {'id': file['id']}\n try:\n tweet['created_time'] = file['retweeted_status']['created_at']\n tweet['text'] = file['retweeted_status']['full_text']\n except:\n tweet['created_time'] = file['created_at']\n tweet['text'] = file['full_text']\n self.tweets.append(tweet)\n except:\n print(\"Error for \",self.list_of_files[i])\n if i%1000 == 0:\n print(str(round(i/self.no_of_tweets,2)*100),\"% read\")\n print(\"All Tweets read into memory\")", "def store(self, filename):", "def save_records(self, pad_id):\n train_records_path = os.path.join(self.config.records_dir, \"train.tfrecords\")\n dev_records_path = os.path.join(self.config.records_dir, \"dev.tfrecords\")\n test_records_path = os.path.join(self.config.records_dir, \"test.tfrecords\")\n statistics_file = os.path.join(self.config.records_dir, \"statistics.json\")\n\n dict = {}\n dict['train_examples_num'] = len(self.train_set)\n dict['dev_examples_num'] = len(self.dev_set)\n dict['test_examples_num'] = len(self.test_set)\n\n with open(statistics_file, 'w', encoding='utf8')as p:\n json.dump(dict, p)\n\n self._save_records(train_records_path, self.train_set, pad_id)\n self._save_records(dev_records_path, self.dev_set, pad_id)\n self._save_records(test_records_path, self.test_set, pad_id)\n\n self.logger.info(\"all data have saved to records files.\")", "def add_tweet(self, tweet):\r\n self.tweets.append(tweet)", "def dump_to_mongo(tracker, collection):\n\n t = time.time()\n t1 = time.time()\n t0 = time.time()\n\n counter = 0\n minuteCounter = 0\n for tweet in tracker: \n counter += 1\n t = time.time()\n if t - t1 > 60:\n print(\"%s tweets per min @ %s\"%((float(minuteCounter)/(t-t1)*60),datetime.datetime.fromtimestamp(t)))\n minuteCounter = 1\n t1 = t\n else:\n minuteCounter += 1\n\n # Use the numeric Tweet ID as primary key \n tweet['_id'] = tweet['id_str']\n\n # Insert each json as an entry in the mongodb collection\n entry = collection.insert(tweet)", "def _upload_to_twitter(self):\n if self._twitter:\n strip_file = self.create_strip(resolution_ratio=0.5)\n f = open(strip_file)\n self._twitter.request('statuses/update_with_media', {'status': self._twitter_text}, {'media[]': f.read()})\n f.close()\n os.remove(strip_file)", "def statusWrite(self, statusType):\n\n timeStatus = strftime('%H:%M:%S@%m/%d/%y')\n\n # dict used for ease of writing\n statusDict = {\n \"on\":\"\\nProgram start: \",\n \"start\":\"Detection start: \",\n \"stop\":\"Detection stop: \",\n \"in\":\"Bird in: \",\n \"here\":\"Bird still here: \",\n \"out\":\"Bird has left: \",\n \"done\":\"Program close: \"\n }\n\n with open(\"birdlog.txt\",'a') as statusFile:\n statusFile.write(statusDict[statusType] + timeStatus + \"\\n\")", "def saveUsage(self, filePath):\n message = time.strftime('%c') + ' : '\n for spot in self.getParkingSpots():\n message += str(spot.id) + ', ' + spot.status + '; '\n with open(filePath, 'a+') as outfile:\n outfile.write(message + '\\n')\n pass", "def save():\n pass", "def handle_new_tweet(tweet_data):\n\n assert tweet_data.get('id'), \"Tweet Must have ID\"\n assert tweet_data.get('search_string'), \"Tweet must have search string\"\n\n # check for this tweet already being tracked\n set_key = keys.tweet_search_set(tweet_data.get('search_string'))\n tweet_id = tweet_data.get('id')\n found = rc.zrank(set_key, tweet_id)\n print 'set key: %s' % set_key\n print 'found: %s' % found\n\n if not found:\n\n # set main hash\n key = keys.tweet_data(tweet_data.get('id'))\n rc.hmset(key, tweet_data)\n\n # add to our weighted set\n # keep the value as the id and the weight\n print 'adding: %s' % tweet_id\n rc.zadd(set_key, tweet_id, tweet_id)\n\n # fire event that tweet was added to db\n revent.fire('new_tweet_saved', tweet_data)\n\n return True\n\n return False", "def insert_tweets(post):\n db_file = dbFile\n try:\n conn = sqlite3.connect(db_file)\n except Exception as e:\n print(e)\n for i in range(0,len(post['id_str'])):\n tweet={}\n tweet['user_id']=post['user_id']\n tweet['created_at'] = post['created_at'][i]\n tweet['id_str'] = post['id_str'][i]\n tweet['text'] = post['text'][i]\n tweet['source'] = post['source'][i]\n tweet['truncated'] = post['truncated'][i]\n tweet['in_reply_to_status_id_str'] = post['in_reply_to_status_id_str'][i]\n tweet['in_reply_to_screen_name'] = post['in_reply_to_screen_name'][i]\n tweet['coordinatesNumber'] = post['coordinatesNumber'][i]\n tweet['coordinates'] = post['coordinates'][i]\n tweet['coordinatesType'] = post['coordinatesType'][i]\n tweet['placeCountry'] = post['placeCountry'][i]\n tweet['placeCountryCode'] = post['placeCountryCode'][i]\n tweet['placeFullName'] = post['placeFullName'][i]\n tweet['placeID'] = post['placeID'][i]\n tweet['placeName'] = post['placeName'][i]\n tweet['placeType'] = post['placeType'][i]\n tweet['placeURL'] = post['placeURL'][i]\n tweet['quoted_status_id_str'] = post['quoted_status_id_str'][i]\n tweet['is_quote_status'] = post['is_quote_status'][i]\n tweet['retweeted_status'] = post['retweeted_status'][i]\n tweet['quote_count'] = post['quote_count'][i]\n tweet['reply_count'] = post['reply_count'][i]\n tweet['retweet_count'] = post['retweet_count'][i]\n tweet['favorite_count'] = post['favorite_count'][i]\n tweet['hashtagsNumber'] = post['hashtagsNumber'][i]\n tweet['hashtags'] = post['hashtags'][i]\n tweet['urls'] = post['urls'][i]\n tweet['urlsNumber'] = post['urlsNumber'][i]\n tweet['user_mentionsNumber'] = post['user_mentionsNumber'][i]\n tweet['user_mentions'] = post['user_mentions'][i]\n tweet['mediaNumber'] = post['mediaNumber'][i]\n tweet['mediaURLs'] = post['mediaURLs'][i]\n tweet['mediaType'] = post['mediaType'][i]\n tweet['symbolsNumber'] = post['symbolsNumber'][i]\n tweet['symbols'] = post['symbols'][i]\n tweet['pollsNumber'] = post['pollsNumber'][i]\n tweet['polls'] = post['polls'][i]\n tweet['possibly_sensitive'] = post['possibly_sensitive'][i]\n tweet['filter_level'] = post['filter_level'][i]\n tweet['lang'] = post['lang'][i]\n tweet['matching_rulesNumber'] = post['matching_rulesNumber'][i]\n tweet['matching_rulesTag'] = post['matching_rulesTag'][i]\n tweet['matching_rulesID'] = post['matching_rulesID'][i]\n tweet['collected_at'] = strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n sqlite_insert(conn, 'GTapp_tweets', tweet)", "def save(self):\r\n self.updated_at = datetime.now()\r\n models.storage.save()", "def _update_cmd_time_info(self, end=False):\n time_stamp = time.time()\n time_passed = time_stamp - self._start_time\n if end:\n docs_proc_now = self._docs_processed % self._file_write_threshhold\n if docs_proc_now == 0:\n msg = ('Written {} documents to file in total. '\n 'Time passed: {:2f}')\n print(msg.format(self._docs_processed, time_passed))\n else:\n msg = ('Writing {} documents to file. '\n 'Written {} documents to file in total. '\n 'Time passed: {:2f}')\n print(msg.format(\n docs_proc_now, self._docs_processed, time_passed))\n else:\n msg = ('Writing {} documents to file. '\n 'Written {} documents to file in total. '\n 'Time passed: {:2f}')\n print(msg.format(self._file_write_threshhold,\n self._docs_processed, time_passed))", "def postTweet(self, userId, tweetId):\r\n self.timestamp += 1\r\n self.tweets_by_user[userId].append((self.timestamp, tweetId))", "def touch(self):\n self._timestamps['last_seen'] = rospy.get_rostime()", "def RT(ID, name):\r\n \"\"\"Takes a ID and username parameter\"\"\"\r\n \"\"\"Once tweeted log is updated in overall and to date tweetlog\"\"\"\r\n \r\n config = config_create()\r\n print(\"RT\")\r\n #Tid = int(float(ID))\r\n Tweetusername = config.get('Auth', 'botname')\r\n #TweetText = 'https://twitter.com/'+Tweetusername+'/status/'+ID\r\n #ReTweet = 'Hi I am ComicTweetBot!('+tim+') I Retweet Comics! Use #comicretweetbot '+TweetText\r\n x2 = config.get('Latest_Log', 'currenttweetlog')\r\n x3 = config.get('Latest_Log', 'overalllog')\r\n CONSUMER_KEY = config.get('Auth', 'CONSUMER_KEY') \r\n CONSUMER_SECRET = config.get('Auth', 'CONSUMER_SECRET')\r\n ACCESS_KEY = config.get('Auth', 'ACCESS_KEY')\r\n ACCESS_SECRET = config.get('Auth', 'ACCESS_SECRET')\r\n api = Twython(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_KEY, ACCESS_SECRET)\r\n tMax = int(float(config.get('Tweet_Delay', 'Max')))\r\n tMin = int(float(config.get('Tweet_Delay', 'Min')))\r\n tStep = int(float(config.get('Tweet_Delay', 'Step')))\r\n Log = open(x2, 'w')\r\n enterlog = ID+' '+name+ '\\n'\r\n Log.write(enterlog)\r\n Log2 = open(x3, 'w')\r\n Log2.write(ID+'\\n')\r\n #api.update_status(status= ReTweet)\r\n api.retweet(id = ID)\r\n api.create_favorite(id=ID, include_entities = True)\r\n #randomize the time for sleep 1.5mins to 5 mins\r\n rant = random.randrange(tMin, tMax, tStep)\r\n time.sleep(rant)", "def _save(self):\n with open(self.file_path, 'w') as fid:\n json.dump(self.data, fid, indent=4, sort_keys=True)", "def put(self):\n if 'file' not in self.request.POST:\n self.request.errors.add('body', 'file', 'Not Found')\n self.request.errors.status = 404\n return\n tender = TenderDocument.load(self.db, self.tender_id)\n if not tender:\n self.request.errors.add('url', 'tender_id', 'Not Found')\n self.request.errors.status = 404\n return\n data = self.request.POST['file']\n bids = [i for i in tender.bids if i.id == self.bid_id]\n if not bids:\n self.request.errors.add('url', 'bid_id', 'Not Found')\n self.request.errors.status = 404\n return\n bid = bids[0]\n documents = [i for i in bid.documents if i.id == self.request.matchdict['id']]\n if not documents:\n self.request.errors.add('url', 'id', 'Not Found')\n self.request.errors.status = 404\n return\n src = tender.serialize(\"plain\")\n document = Document()\n document.id = self.request.matchdict['id']\n document.title = data.filename\n document.format = data.type\n document.datePublished = documents[0].datePublished\n key = uuid4().hex\n document.url = self.request.route_url('Tender Bid Documents', tender_id=self.tender_id, bid_id=self.bid_id, id=document.id, _query={'download': key})\n bid.documents.append(document)\n filename = \"{}_{}\".format(document.id, key)\n tender['_attachments'][filename] = {\n \"content_type\": data.type,\n \"data\": b64encode(data.file.read())\n }\n patch = make_patch(tender.serialize(\"plain\"), src).patch\n tender.revisions.append(revision({'changes': patch}))\n try:\n tender.store(self.db)\n except Exception, e:\n return self.request.errors.add('body', 'data', str(e))\n return {'data': document.serialize(\"view\")}", "def save(self, filename, format = \"text\"):\n #\n for time in self.mdvtc.keys():\n if format == \"csv\":\n save_filename = filename + str(int(time)) + \".csv\"\n elif format == \"text\":\n save_filename = filename + str(int(time)) + \".txt\"\n else:\n save_filename = filename + str(int(time)) + \".txt\"\n self.mdvtc[time].save(save_filename, format)", "def serialize_dirty(self):\n pass", "def save(self, output, data):", "def record(self, backend, job_id, status):\n self.file.write(\"%s;%d;%r\\n\" % (backend, job_id, status))\n self.file.flush()" ]
[ "0.6658211", "0.6079574", "0.6022582", "0.5911571", "0.58289623", "0.57782304", "0.57550776", "0.57268596", "0.5723704", "0.5721074", "0.57111156", "0.56885713", "0.5671371", "0.5641506", "0.56221664", "0.5615904", "0.5552578", "0.5543077", "0.55106515", "0.55028576", "0.54943603", "0.549327", "0.5478818", "0.54714483", "0.54417634", "0.54250646", "0.5414281", "0.5397367", "0.5395642", "0.5361713", "0.5348247", "0.53443944", "0.53404266", "0.5334647", "0.5330042", "0.53221625", "0.53188753", "0.5317922", "0.53173465", "0.5316704", "0.53148043", "0.5297409", "0.5286405", "0.527339", "0.52678376", "0.5267804", "0.5262293", "0.5257076", "0.5240381", "0.5231229", "0.5229069", "0.5221237", "0.5215941", "0.52155286", "0.5206265", "0.5204232", "0.52001584", "0.5199457", "0.5195414", "0.518156", "0.51785445", "0.5172184", "0.51690733", "0.516402", "0.5161008", "0.5147322", "0.51458013", "0.5143916", "0.5135175", "0.5134935", "0.51304686", "0.51267856", "0.5126336", "0.5112576", "0.5111534", "0.5111534", "0.5110328", "0.5107936", "0.51071596", "0.51045924", "0.51016796", "0.5099595", "0.509629", "0.5092953", "0.5092429", "0.5091644", "0.5087585", "0.5082913", "0.50769836", "0.5074427", "0.5070802", "0.50680673", "0.50677335", "0.5066004", "0.5063395", "0.50608397", "0.50596875", "0.50561804", "0.50514483", "0.504649" ]
0.7087227
0
post the tweet with a media and text
def __post_status(self, text, media_id): params = { "status": text, "media_ids": ",".join(map(str, [media_id])) } response = self.session.post(STATUS_UPDATE_URL, data=params) res_err(response, "POSTING THE TWEET AFTER MEDIA UPLOAD") logging.info(f'posted {text}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_tweet(media_id, quote, movie_name):\n\n tweet = '{} - {}'.format(quote, movie_name)\n request_data = {\n 'status': tweet,\n 'media_ids': media_id\n }\n\n requests.post(url=constants.POST_TWEET_URL, data=request_data, auth=oauth)", "def post_to_tweets(data, url):\n\n print(\"here,\", url)\n\n albums = find_all_images(data['content'])\n text = strip_text(data['content'])\n\n \"\"\"Where applicable, the images are associated with the text. This means, that to make an appropriate thread the\n conversion from a post to tweets should take into account how words relate to images in a spacial way. For this\n reason, we convert to tweets in batches.\"\"\"\n\n cfg = get_keys() # grab keys\n api = get_api(cfg) # setup API\n in_reply_to = None\n twitter_url = 'https://twitter.com'\n\n # for idx, caption in enumerate(text):\n # if idx > 0:\n # url_img = None\n # caption = re.findall(r\"[\\w']+|[.!?;]\\ \", caption)\n # text[idx] = text_to_tweets(caption, url_img)\n\n try:\n if data['in_reply_to'] is not None: # if post is reply ...\n for reply in data['in_reply_to']:\n if reply[:len(twitter_url)] == twitter_url: # if the URL points to twitter ...\n in_reply_to = reply.split('/')[-1:] # ... get the status id\n except KeyError:\n pass\n\n url = 'https://' + DOMAIN_NAME + url\n\n tweets = text_to_tweets(text, url=url) # process string into tweet thread\n\n # try and parse a lat lng.\n try:\n lat, lng = data['geo'][4:].split(\",\")\n except KeyError:\n lat, lng = None, None\n\n # post the first tweet so that we have a status id to start the thread\n status = api.update_status(status=tweets[0].pop(0), in_reply_to_status_id=in_reply_to)\n first_id = status.id # the id which points to origin of thread\n\n for album_group in text:\n try:\n media = album_group.pop(0) # get the corresponding album\n for tweet in album_group:\n status = api.update_with_media(filename=media, status=tweet, in_reply_to_status_id=status.id, lat=lat, long=lng)\n media = None\n except IndexError: # if we're out of albums...\n pass\n return 'http://twitter.com/{name}/status/{id}'.format(name=status.user.screen_name, id=first_id, lat=lat, lng=lng)", "def tweet(self, twitter_post, instruction):\n if instruction is None:\n logging.error('Instruction parameter missing')\n return TwitterResponse(description='Instruction parameter missing')\n\n if instruction == Instruction.PROCESS_WEATHER_DATA:\n twit_content = \"{}, {} {} C {}\".format(twitter_post.post_text, twitter_post.condition, twitter_post.temperature,\n twitter_post.youtube_url)\n if instruction == Instruction.PROCESS_ARTIST:\n twit_content = \"Requested: {} {}\".format(twitter_post.post_text, twitter_post.youtube_url)\n\n if instruction == Instruction.PROCESS_INSTAGRAM_POST:\n twit_content = twitter_post.post_text\n\n if twitter_post.post_text is None or twitter_post.youtube_url is None:\n return TwitterResponse(description='Twitter post text or youtube_url not resolved!')\n try:\n status = self.api.PostUpdate(twit_content)\n logging.info('Posted twit with status: %s', status)\n return TwitterResponse(status)\n except TwitterError as e:\n logging.error('Error posting twit: %s', e.message[0]['message'])\n return TwitterResponse(description='Fatal error while posting tweet')", "def send_tweet(tweet_text, uuid):\n try:\n query = {} # API call to twitter-service\n query['app_name'] = 'webcamd'\n query['uuid'] = uuid\n query['tweet_text'] = tweet_text\n query['hashtag_arg'] = 'metminiwx' # do not supply the #\n query['lat'] = 51.4151 # FIXME - put in definitions.py Stockcross\n query['lon'] = -1.3776 # Stockcross\n\n status_code, response_dict = cumulus_comms.call_rest_api(get_env.get_twitter_service_endpoint() + '/send_text', query)\n\n if response_dict['status'] == 'OK' :\n tweet_len = response_dict['tweet_len'].__str__()\n print('Tweet sent OK, tweet_len=' + tweet_len + ', uuid=' + uuid.__str__())\n else:\n print(response_dict['status'])\n\n except Exception as e:\n print('Error : send_tweet() : ' + e.__str__())", "def tweet(self):\n self.__refresh_local_tweets()\n\n if not self.tweets:\n return\n\n tweet_obj = self.tweets[0]\n\n # upload picture\n media_id = self.__upload_media(tweet_obj[\"img\"])\n\n # tweet with text, and image\n if not media_id:\n return\n self.__post_status(tweet_obj[\"text\"], media_id)\n\n self.tweets.remove(tweet_obj)\n self.tweeted.append(tweet_obj)\n self.__update_local_tweets()", "def send_tweet(tweet_text):\n twitter.update_status(status = tweet_text)", "def send_tweet(data):\n # Fill in the values noted in previous step here\n cfg = get_keys() # grab keys\n api = get_api(cfg) # setup API\n in_reply_to = None\n twitter_url = 'https://twitter.com'\n if data['in_reply_to'] is not None: # if post is reply ...\n for reply in data['in_reply_to']:\n if reply[:len(twitter_url)] == twitter_url: # if the URL points to twitter ...\n in_reply_to = reply.split('/')[-1:] # ... get the status id\n url = 'https://' + DOMAIN_NAME + data['url']\n tweets = post_to_tweets(data=data['content'], url=url) # process string into tweet thread\n # post the first tweet so that we have a status id to start the thread\n status = api.update_status(status=tweets.pop(0), in_reply_to_status_id=in_reply_to)\n first_id = status.id # the id which points to origin of thread\n try:\n lat,lng = data['geo'][4:].split(\",\")\n except KeyError:\n lat, lng = None, None\n for tweet in tweets:\n status = api.update_status(status=tweet, in_reply_to_status_id=status.id)\n return 'http://twitter.com/{name}/status/{id}'.format(name=status.user.screen_name, id=first_id, lat=lat, lng=lng)", "def sending_process(self, quote, movie_poster_url, movie_name):\n file_name, file_type = download_poster(movie_poster_url)\n media_upload = async_upload.VideoTweet(file_name, oauth)\n media_id = media_upload.upload_init(file_type)\n media_upload.upload_append()\n try:\n media_upload.upload_finalize()\n except Exception as e:\n print('Media Upload Error', str(e))\n return self.run()\n\n send_tweet(media_id, quote, movie_name)", "def send_tweet(tweet_text):\n twitter.update_status(status=tweet_text)", "def send_tweet(tweet_text):\n twitter.update_status(status=tweet_text)", "def send_tweet(tweet_text):\n twitter.update_status(status=tweet_text)", "def _upload_to_twitter(self):\n if self._twitter:\n strip_file = self.create_strip(resolution_ratio=0.5)\n f = open(strip_file)\n self._twitter.request('statuses/update_with_media', {'status': self._twitter_text}, {'media[]': f.read()})\n f.close()\n os.remove(strip_file)", "def post_tweet():\n if not request.get_json() or 'tweet' not in request.get_json():\n raise exceptions.HttpError(message=\"No tweet info in body\")\n\n post_tweet = Tweet(\n id=None,\n name=None,\n tweet=request.get_json()[\"tweet\"],\n created_at=None,\n type = 'original'\n )\n\n tweet = Storage.post_tweet(tweet=post_tweet)\n return jsonify(tweet.to_dict()), 201", "def add_tweet():\n if not request.json or 'author_id' not in request.json or 'text' not in request.json:\n abort(400)\n\n db = get_db()\n\n author_id = request.json.get('author_id')\n text = request.json.get('text')\n pub_date = int(time.time())\n\n db.execute('''insert into message (author_id, text, pub_date) values (?, ?, ?)''', (author_id, text, pub_date))\n db.commit()\n flash('Message recorded succesfully')\n message = {\"author_id\": author_id, \"text\": text, \"pub_date\": pub_date}\n return jsonify({'message': message}), 201", "def upload(media, media_data, *, additional_owners=_ELIDE,\n media_category=_ELIDE):\n binding = {'media': media, 'media_data': media_data, 'additional_owners':\n additional_owners, 'media_category': media_category}\n url = 'https://upload.twitter.com/1.1/media/upload.json'\n return _TwitterRequest('POST',\n url,\n 'rest:media',\n 'post-media-upload',\n binding)", "def command_tweet(self, bot, update):\n\n bot.sendChatAction(update.message.chat_id, action='typing')\n\n tweet = ext.get_last_tweet(self.config['twitter'])\n\n for url in tweet.get('images', []):\n self.send_photo_url(bot, update, url)\n\n messages = [\n u'{text}',\n '[@{user[screen_name]}](https://twitter.com/{user[screen_name]}) '\n '- {ago}'\n ]\n\n for msg in messages:\n self.send_message(bot, update, msg.format(**tweet))", "def post_to_twitter(tweet):\n auth = tweepy.OAuthHandler(\n os.environ['BLADAMADUR_CONSUMER_KEY'],\n os.environ['BLADAMADUR_CONSUMER_SECRET'])\n auth.set_access_token(\n os.environ['BLADAMADUR_ACCESS_TOKEN'],\n os.environ['BLADAMADUR_ACCESS_TOKEN_SECRET'])\n api = tweepy.API(auth)\n\n api.update_status(tweet)", "def send_tweet_with_video(tweet_text, filename, uuid):\n try:\n query = {} # API call to twitter-service\n query['app_name'] = 'webcamd'\n query['uuid'] = uuid\n query['tweet_text'] = tweet_text\n query['hashtag_arg'] = 'metminiwx' # do not supply the #\n query['lat'] = 51.4151 # Stockcross\n query['lon'] = -1.3776 # Stockcross\n query['video_pathname'] = filename\n\n status_code, response_dict = cumulus_comms.call_rest_api(get_env.get_twitter_service_endpoint() + '/send_video', query)\n\n # print('status_code=' + status_code.__str__())\n # pprint(response_dict)\n # if response_dict['status'] == 'OK' and response_dict['tweet_sent'] == True:\n if response_dict['status'] == 'OK' :\n tweet_len = response_dict['tweet_len'].__str__()\n print('Tweet sent OK, tweet_len=' + tweet_len + ', uuid=' + uuid.__str__())\n else:\n print(response_dict['status'])\n\n except Exception as e:\n print('Error : send_tweet_with_video() : ' + e.__str__())", "def post_tweet(instance, created, raw, **kwargs):\n if created and not raw:\n _twitter.statuses.update(status='%s %s' % (\n instance.title,\n instance.url,\n ))", "def post_tweet(self, message):\n twitter = TwitterAPI(\n # os.environ[\"consumerKey\"],\n # os.environ[\"consumerSecret\"],\n # os.environ[\"accessToken\"],\n # os.environ[\"accessTokenSecret\"],\n self.twitter_creds[\"consumerKey\"],\n self.twitter_creds[\"consumerSecret\"],\n self.twitter_creds[\"accessToken\"],\n self.twitter_creds[\"accessTokenSecret\"],\n )\n\n request = twitter.request(\"statuses/update\", {\"status\": message})\n\n status_code = request.status_code\n if status_code == 200:\n rootLogger.info(\"Successfully tweeted: {}\".format(message))\n else:\n rootLogger.error(\"HTTP status code: {} -- unsuccessfully tweeted: {}\".format(status_code, message))", "def twitter_text(\n self,\n text: str,\n urls: List[Dict[str, str]],\n user_mentions: List[Dict[str, Any]],\n media: List[Dict[str, Any]],\n ) -> Element:\n\n to_process: List[Dict[str, Any]] = []\n # Build dicts for URLs\n for url_data in urls:\n to_process.extend(\n {\n \"type\": \"url\",\n \"start\": match.start(),\n \"end\": match.end(),\n \"url\": url_data[\"url\"],\n \"text\": url_data[\"expanded_url\"],\n }\n for match in re.finditer(re.escape(url_data[\"url\"]), text, re.IGNORECASE)\n )\n # Build dicts for mentions\n for user_mention in user_mentions:\n screen_name = user_mention[\"screen_name\"]\n mention_string = \"@\" + screen_name\n to_process.extend(\n {\n \"type\": \"mention\",\n \"start\": match.start(),\n \"end\": match.end(),\n \"url\": \"https://twitter.com/\" + urllib.parse.quote(screen_name),\n \"text\": mention_string,\n }\n for match in re.finditer(re.escape(mention_string), text, re.IGNORECASE)\n )\n # Build dicts for media\n for media_item in media:\n short_url = media_item[\"url\"]\n expanded_url = media_item[\"expanded_url\"]\n to_process.extend(\n {\n \"type\": \"media\",\n \"start\": match.start(),\n \"end\": match.end(),\n \"url\": short_url,\n \"text\": expanded_url,\n }\n for match in re.finditer(re.escape(short_url), text, re.IGNORECASE)\n )\n # Build dicts for emojis\n for match in POSSIBLE_EMOJI_RE.finditer(text):\n orig_syntax = match.group(\"syntax\")\n codepoint = emoji_to_hex_codepoint(unqualify_emoji(orig_syntax))\n if codepoint in codepoint_to_name:\n display_string = \":\" + codepoint_to_name[codepoint] + \":\"\n to_process.append(\n {\n \"type\": \"emoji\",\n \"start\": match.start(),\n \"end\": match.end(),\n \"codepoint\": codepoint,\n \"title\": display_string,\n }\n )\n\n to_process.sort(key=lambda x: x[\"start\"])\n p = current_node = Element(\"p\")\n\n def set_text(text: str) -> None:\n \"\"\"\n Helper to set the text or the tail of the current_node\n \"\"\"\n if current_node == p:\n current_node.text = text\n else:\n current_node.tail = text\n\n db_data: Optional[DbData] = self.zmd.zulip_db_data\n current_index = 0\n for item in to_process:\n # The text we want to link starts in already linked text skip it\n if item[\"start\"] < current_index:\n continue\n # Add text from the end of last link to the start of the current\n # link\n set_text(text[current_index : item[\"start\"]])\n current_index = item[\"end\"]\n if item[\"type\"] != \"emoji\":\n elem = url_to_a(db_data, item[\"url\"], item[\"text\"])\n assert isinstance(elem, Element)\n else:\n elem = make_emoji(item[\"codepoint\"], item[\"title\"])\n current_node = elem\n p.append(elem)\n\n # Add any unused text\n set_text(text[current_index:])\n return p", "async def tweet():\n with logger.contextualize(request_id=str(uuid.uuid4())):\n tweets = generate()\n upload(tweets)", "def tweet(api, message):\n status = api.PostUpdate(message)", "def publish_tweet(self, tweet):\n\n self.api.update_status(tweet)", "def tweet(self, message: str) -> None:\n\n # YOUR CODE HERE\n tweet = Tweet(self.userid, date.today(), message)\n self.tweets.append(tweet)", "def tweet(self, message):\n if self.api_key == \"\":\n return False\n ts = TagStripper()\n ts.feed(message)\n params = urllib.urlencode(\n {'api_key': self.api_key,\n 'status': ts.get_collected_data()})\n\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\",\n \"Accept\": \"text/plain\"}\n\n conn = httplib.HTTPConnection(\"api.thingspeak.com:80\")\n\n try:\n conn.request(\"POST\",\n \"/apps/thingtweet/1/statuses/update\",\n params,\n headers)\n\n response = conn.getresponse()\n data = response.read()\n conn.close()\n return True, ts.get_collected_data()\n except httplib.HTTPException as http_exception:\n return False, http_exception.message", "def tweet_it(string, credentials, image=None):\n if len(string) <= 0:\n return\n\n # Create and authorise an app with (read and) write access at:\n # https://dev.twitter.com/apps/new\n # Store credentials in YAML file\n auth = twitter.OAuth(\n credentials[\"access_token\"],\n credentials[\"access_token_secret\"],\n credentials[\"consumer_key\"],\n credentials[\"consumer_secret\"],\n )\n t = twitter.Twitter(auth=auth)\n\n print(\"TWEETING THIS:\\n\" + string)\n\n if args.test:\n print(\"(Test mode, not actually tweeting)\")\n else:\n\n if image:\n print(\"Upload image\")\n\n # Send images along with your tweets.\n # First just read images from the web or from files the regular way\n with open(image, \"rb\") as imagefile:\n imagedata = imagefile.read()\n t_up = twitter.Twitter(domain=\"upload.twitter.com\", auth=auth)\n id_img = t_up.media.upload(media=imagedata)[\"media_id_string\"]\n\n result = t.statuses.update(status=string, media_ids=id_img)\n else:\n result = t.statuses.update(status=string)\n\n url = (\n \"http://twitter.com/\"\n + result[\"user\"][\"screen_name\"]\n + \"/status/\"\n + result[\"id_str\"]\n )\n print(\"Tweeted:\\n\" + url)\n if not args.no_web:\n webbrowser.open(url, new=2) # 2 = open in a new tab, if possible", "def metadata_create():\n binding = {}\n url = 'https://upload.twitter.com/1.1/media/metadata/create.json'\n return _TwitterRequest('POST',\n url,\n 'rest:media',\n 'post-media-metadata-create',\n binding)", "def send_tweet(self):\n \n ## Check the quality/score\n quality = self.sunsetwx_response['features'][0]['properties']['quality']\n score = self.sunsetwx_response['features'][0]['properties']['quality_percent']\n \n ## For great ones... compose a status\n if quality == 'Great':\n \n local_time_str = self.time_converted.strftime(\"%I:%M %p\")\n if self.type == 'sunrise':\n time_of_day_str = 'tomorrow morning'\n elif self.type == 'sunset':\n time_of_day_str = 'this evening'\n status = f'Looks like there will be a great {self.type} in {self.location} {time_of_day_str}! Check it out at {local_time_str}.'\n \n ## Post about the great ones\n api.update_status(status=status)\n \n ## Update the log regardless\n self.update_log_record(datetime.today().strftime(\"%Y-%m-%d\"), score)", "def post_to_twitter(sender, instance, *args, **kwargs):\n\n # avoid to post the same object twice\n if not kwargs.get('created'):\n return False\n\n # check if there's a twitter account configured\n import tweepy\n try:\n consumer_key = os.environ.get('TWITTER_CONSUMER_KEY')\n consumer_secret = os.environ.get('TWITTER_CONSUMER_SECRET')\n access_key = os.environ.get('TWITTER_ACCESS_KEY')\n access_secret = os.environ.get('TWITTER_ACCESS_SECRET')\n except AttributeError:\n print 'WARNING: Twitter account not configured.'\n return False\n\n # create the twitter message\n try:\n text = instance.get_twitter_message()\n except AttributeError:\n text = unicode(instance)\n\n mesg = u'%s' % (text)\n if len(mesg) > TWITTER_MAXLENGTH:\n size = len(mesg + '...') - TWITTER_MAXLENGTH\n mesg = u'%s...' % (text[:-size])\n\n try:\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_key, access_secret)\n api = tweepy.API(auth)\n api.update_status(mesg)\n except urllib2.HTTPError, ex:\n print 'ERROR:', str(ex)\n return False", "def post(self, request, *args, **kwargs):\n form = self.form_class(data=request.POST, user=request.user)\n\n if form.is_valid():\n instance = form.save()\n\n # Deal with posting twitter status\n posted_status = False\n if instance.category != get_default_category():\n # Create tweeted message and post status\n tweeted_message = \"RT:<%s>%s#Custom#%s\" % (instance.created_by.username, instance.message, instance.category)\n api = twitter.Api(consumer_key=settings.TWITTER_CONSUMER_KEY, consumer_secret=settings.TWITTER_CONSUMER_SECRET,\n access_token_key=settings.TWITTER_USER_OAUTH_TOKEN, access_token_secret=settings.TWITTER_USER_OAUTH_TOKEN_SECRET)\n api.PostUpdate(status=tweeted_message)\n\n # Save time published and update postedStatus\n instance.published_on = timezone.now()\n instance.save()\n posteds_status = True\n\n return render(request, 'tweets/index.html', {'form': self.form_class(user=request.user), 'success': True, 'posted_status': posted_status})\n return render(request, 'tweets/index.html', {'form': form})", "def tweet(text):\n # Twitter authentication\n auth = tweepy.OAuthHandler(C_KEY, C_SECRET)\n auth.set_access_token(A_TOKEN, A_TOKEN_SECRET)\n api = tweepy.API(auth)\n\n # Send the tweet and log success or failure\n try:\n api.update_status(text)\n except tweepy.error.TweepError as e:\n log(e.message)\n print(e.message)\n else:\n log(\"Tweeted: \" + text)\n print(\"Tweeted: \" + text)", "def sendTweets(self):\n\n if self.__status_type == 'link':\n\n for index, item in self.list.iterrows():\n\n title = item['title']\n url = item['url']\n message = (url + \" \" + title)[0:140]\n\n if self.__image == None:\n self.__api.update_status(status=message)\n else:\n self.__api.update_with_media(filename=self.__image, status=message)\n\n elif self.__status_type == 'single_msg':\n\n message = (self.__status)[0:140]\n\n if self.__image == None:\n self.__api.update_status(status=message)\n else:\n self.__api.update_with_media(filename=self.__image, status=message)\n\n elif self.__status_type == 'reply':\n\n for index, item in self.list.iterrows():\n\n message = (\".@\" + item['user'] + \" \" + self.__status)[0:140]\n\n try:\n if self.__image == None:\n self.__api.update_status(status=message, in_reply_to_status_id=item['id'])\n else:\n self.__api.update_with_media(filename=self.__image, status=message,\n in_reply_to_status_id=item['id'])\n except KeyError:\n print(\"List does not include necessary column(s).\")\n print(\"reply status type used when generating list based on Twitter search.\")\n print(\"Change search_on to twitter and create list.\")\n return\n\n elif self.__status_type == 'at':\n\n for index, item in self.list.iterrows():\n\n try:\n\n message = (\".@\" + item['user'] + \" \" + self.__status)[0:140]\n\n if self.__image == None:\n self.__api.update_status(status=message)\n else:\n self.__api.update_with_media(filename=self.__image, status=message)\n\n except KeyError:\n print(\"List does not include necessary column(s).\")\n print(\"at status type used when generating list based on Twitter search.\")\n print(\"Change search_on to twitter and create list.\")\n return\n\n elif self.__status_type == 'rt':\n\n for index, item in self.list.iterrows():\n try:\n self.__api.retweet(item['id'])\n except KeyError:\n print(\"List does not include necessary column(s).\")\n print(\"at status type used when generating list based on Twitter search.\")\n print(\"Change search_on to twitter and create list.\")\n return\n\n else:\n print(\"Invalid status type. Change status type through configure_tweet method.\")\n\n return", "def status_update(status, n_pha, media):\r\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\r\n auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)\r\n api = tweepy.API(auth)\r\n\r\n message = MESSAGE_TEMPLATE.format(n_pha)\r\n if media:\r\n api.update_with_media(media, status=message)\r\n else:\r\n api.update_status(message)\r\n return \"Tweet Published\"", "def tweet(text):\n # Twitter authentication\n auth = tweepy.OAuthHandler(C_KEY, C_SECRET)\n auth.set_access_token(A_TOKEN, A_TOKEN_SECRET)\n api = tweepy.API(auth)\n\n # Send the tweet and log success or failure\n try:\n api.update_status(text)\n except tweepy.error.TweepError as e:\n log(e.message)\n else:\n log(\"Tweeted: \" + text)", "def twitter(self):\n message = \"\"\n count = self.collection.count()\n\n twitter = Twitter(auth = OAuth(self.access_key, self.access_secret, self.consumer_key, self.consumer_secret))\n for keyword in self.twitter_keywords:\n query = twitter.search.tweets(q = keyword)\n for result in query['statuses']:\n try:\n data = {\"id\": count+1, \"source\": \"twitter\", \"timestamp\": datetime.now()}\n data['tweet'] = result['text']\n data['name'] = result[\"user\"][\"screen_name\"]\n data['url'] = \"https://twitter.com/\" + data[\"name\"] + \"/status/\" + str(result['id'])\n data['search_string'] = keyword\n try:\n dataid = self.collection.insert(data)\n except DuplicateKeyError as e:\n continue\n count += 1\n\n # Slack push notification\n length = 82 - len(data['url'])\n message += \"\\nURL: \" + data['url'] + \" search string: \".rjust(length) + keyword\n\n except Exception as e:\n print(e)\n pass\n \n if message:\n print(self.G + \"[+] Twitter\" + self.B + message)\n self.message += \"\\n*Twitter*:\\n```\"\n self.message += message\n self.message += \"\\n```\"\n\n return", "def tweet(self, tweet, at=None):\n if tweet.strip() == \"\":\n return\n\n num_tweets, tweets = self._divide_tweet(tweet, at)\n if num_tweets > 0:\n # replace @'s with #'s and convert unicode emojis before tweeting\n [self.api.update_status(tw.replace(\"@\", \"#\").encode(\"utf-8\")) for tw in tweets]\n self.log(f\"Tweeted: {' '.join(tweets)}\")\n return tweets[0]", "def tweet_text(tweet):\n return tweet['text']", "async def add_tweet(self, tid=None): \n try:\n data=json.loads(self.request.body.decode('utf-8'))\n except: \n print(\"No data body!\")\n\n #print(\"Coordinates: {}\".format(data[\"coordinates\"]))\n if \"place\" in data:\n print(\"Place: {}\".format(data[\"place\"]))\n\n #print(\"User location: {}\".format(data[\"user\"][\"location\"]))\n #print(\"User lang: {}\".format(data[\"user\"][\"lang\"]))\n t=Tweet()\n t.tweet_id = tid\n t = self.fill_tweet(t, data)\n tweet_cache.append(t.to_dict())\n if \"retweeted_status\" in data:\n t.retweeted_status=data[\"retweeted_status\"]\n # \n # save the tweet\n #\n t.upsert()\n #\n # now handle the retweet\n #\n if \"retweeted_status\" in data:\n # this is a retweet so\n # do it once more for the original tweet\n tr=Tweet()\n tr.tweet_id = data[\"retweeted_status\"][\"id_str\"]\n tr = self.fill_tweet(tr, data[\"retweeted_status\"])\n tweet_cache.append(tr.to_dict())\n #tr.upsert()\n #r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #await self.fire_callbacks(r.json())\n #print(t.to_json(),file=ofile)\n #\n # get the embed html from twitter oembed API\n #\n r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #print(r.json())\n \n #print(self.__class__.callbacks)\n await self.fire_callbacks(r.json())\n #self.success(message=\"Added tweet id: {} \".format(str(id)), data=t.to_json(), format=\"json\", pure=True)", "async def tweet_menu(self, ctx, post_list: list,\n message: discord.Message=None,\n page=0, timeout: int=30):\n s = post_list[page]\n colour =\\\n ''.join([randchoice('0123456789ABCDEF')\n for x in range(6)])\n colour = int(colour, 16)\n created_at = s.created_at\n created_at = created_at.strftime(\"%Y-%m-%d %H:%M:%S\")\n post_url =\\\n \"https://twitter.com/{}/status/{}\".format(s.user.screen_name, s.id)\n desc = \"Created at: {}\".format(created_at)\n em = discord.Embed(title=\"Tweet by {}\".format(s.user.name),\n colour=discord.Colour(value=colour),\n url=post_url,\n description=desc)\n em.add_field(name=\"Text\", value=s.text)\n em.add_field(name=\"Retweet count\", value=str(s.retweet_count))\n if hasattr(s, \"extended_entities\"):\n em.set_image(url=s.extended_entities[\"media\"][0][\"media_url\"] + \":thumb\")\n if not message:\n message =\\\n await self.bot.send_message(ctx.message.channel, embed=em)\n await self.bot.add_reaction(message, \"⬅\")\n await self.bot.add_reaction(message, \"❌\")\n await self.bot.add_reaction(message, \"➡\")\n else:\n message = await self.bot.edit_message(message, embed=em)\n react = await self.bot.wait_for_reaction(\n message=message, user=ctx.message.author, timeout=timeout,\n emoji=[\"➡\", \"⬅\", \"❌\"]\n )\n if react is None:\n await self.bot.remove_reaction(message, \"⬅\", self.bot.user)\n await self.bot.remove_reaction(message, \"❌\", self.bot.user)\n await self.bot.remove_reaction(message, \"➡\", self.bot.user)\n return None\n reacts = {v: k for k, v in numbs.items()}\n react = reacts[react.reaction.emoji]\n if react == \"next\":\n next_page = 0\n if page == len(post_list) - 1:\n next_page = 0 # Loop around to the first item\n else:\n next_page = page + 1\n return await self.tweet_menu(ctx, post_list, message=message,\n page=next_page, timeout=timeout)\n elif react == \"back\":\n next_page = 0\n if page == 0:\n next_page = len(post_list) - 1 # Loop around to the last item\n else:\n next_page = page - 1\n return await self.tweet_menu(ctx, post_list, message=message,\n page=next_page, timeout=timeout)\n else:\n return await\\\n self.bot.delete_message(message)", "def on_tweet(self, tweet):\n pass", "def _submit_media(\n self, *, data: dict[Any, Any], timeout: int, websocket_url: str | None = None\n ):\n connection = None\n if websocket_url is not None:\n try:\n connection = websocket.create_connection(websocket_url, timeout=timeout)\n except (\n OSError,\n websocket.WebSocketException,\n BlockingIOError,\n ) as ws_exception:\n msg = \"Error establishing websocket connection.\"\n raise WebSocketException(msg, ws_exception) from None\n\n self._reddit.post(API_PATH[\"submit\"], data=data)\n\n if connection is None:\n return None\n\n try:\n ws_update = loads(connection.recv())\n connection.close()\n except (OSError, websocket.WebSocketException, BlockingIOError) as ws_exception:\n msg = \"Websocket error. Check your media file. Your post may still have been created.\"\n raise WebSocketException(\n msg,\n ws_exception,\n ) from None\n if ws_update.get(\"type\") == \"failed\":\n raise MediaPostFailed\n url = ws_update[\"payload\"][\"redirect\"]\n return self._reddit.submission(url=url)", "def post_msg(text):\n client = WebClient(token=os.environ[\"SLACK_BOT_TOKEN\"])\n client.chat_postMessage(\n channel=os.environ[\"SLACK_CHANNEL\"],\n text=\"News\",\n blocks=[\n {\"type\": \"section\", \"text\": {\"type\": \"mrkdwn\", \"text\": (text)}}],\n )\n return text", "def new_tweet(filename=None, status=None):\n if filename is None:\n filename, comment = create_content()\n if status is None:\n status = comment\n\n with open(\"api_key.txt\") as f:\n api_data = f.readline().split(';')\n twitter = Twython(*api_data)\n\n video = open('./animations/{}.mp4'.format(filename), 'rb')\n response = twitter.upload_video(media=video, media_type='video/mp4')\n twitter.update_status(status=status, media_ids=[response['media_id']])", "def tweet(chopped_string):\n\n status = api.PostUpdate(chopped_string)\n print status.text", "def add_tweet():\r\n tweet = models.Tweet(text_content=request.json['content'], username=request.json['username'],\r\n timestamp=datetime.datetime.now())\r\n db.session.add(tweet)\r\n db.session.commit()\r\n\r\n return {'id': tweet.id}", "def tweet(msg):\r\n m = \"\\n{}\\n\".format(msg)\r\n arcpy.AddMessage(m)\r\n print(m)\r\n print(arcpy.GetMessages())", "def tweet(msg):\n m = \"\\n{}\\n\".format(msg)\n arcpy.AddMessage(m)\n print(m)\n print(arcpy.GetMessages())", "def post_to_twitter(sender, instance, **kwargs):\n\n if instance.pk: #only post the tweet if it's a new record. \n return False \n \n accounts = TwitterAccount.objects.all()\n \n for account in accounts:\n bittle = Bittle.objects.bitlify(instance.get_absolute_url())\n mesg = \"%s: %s\" % (\"New Blog Post\", bittle.shortUrl)\n username = account.username\n password = account.get_password()\n try:\n twitter_api = twitter.Api(username, password)\n twitter_api.PostUpdate(mesg)\n except urllib2.HttpError, ex:\n print str(ex)\n return False", "def tweet(self, test_mode: bool = False):\n\n if not test_mode and not self.is_new_data():\n logger.info(\"No updates yet\")\n return\n\n # Create Tweet\n status_text = (\n \"In Deutschland sind {} Menschen ({}%) geimpft.\\n\"\n \"Davon {} ({}%) vollständig mit zwei Impfungen.\\n\"\n \"Impfungen/Tag: {} (Median über letzten 7 Meldetage)\\n\"\n \"Stand: {}.\".format(\n locale.format_string(\n \"%d\", self._stats.vacc_first, grouping=True, monetary=True\n ),\n locale.format_string(\n \"%.2f\",\n self._stats.vacc_quote_first * 100,\n grouping=True,\n monetary=True,\n ),\n locale.format_string(\n \"%d\", self._stats.vacc_both, grouping=True, monetary=True\n ),\n locale.format_string(\n \"%.2f\",\n self._stats.vacc_quote_complete * 100,\n grouping=True,\n monetary=True,\n ),\n locale.format_string(\n \"%d\", self._stats.vacc_median, grouping=True, monetary=True\n ),\n self._stats.date.strftime(\"%d.%m.%Y\"),\n )\n )\n logger.info(status_text)\n\n self.create_image()\n\n if not test_mode:\n media = self._api.media_upload(TweetBot.IMAGE)\n self._api.update_status(\n status=status_text,\n lat=52.53988938917128,\n long=13.34704871422069,\n media_ids=[media.media_id],\n )\n logger.info(\"Tweeted\")\n else:\n logger.info(\"Test mode. Nothing tweeted\")", "def handler(event,context):\n tweet = setup_and_get_tweet()\n send_tweet(tweet)", "async def make_tweet(tweet: str = Query(...),\n # attachment_url: Optional[str] = Query(None, alias=\"link of tweet to quote\", regex=\"https://twitter.com/([\\w_]+)/status/([\\d]+)\"),\n # in_reply_to: Optional[int] = Query(None, alias=\"link of tweet to reply to\", regex=\"https://twitter.com/([\\w_]+)/status/([\\d]+)\"), \n user: User = Depends(get_current_user),\n session: Session = Depends(get_db)\n )-> TweetSchema:\n if not user.active:\n raise HTTPException(401, detail=\"Your account seems to be inactive, please login with twitter to make tweets\")\n # if in_reply_to:\n # regex = re.match(\"https://twitter.com/(?P<username>[\\w]+)/status/(?P<id>[\\d]+)\", in_reply_to)\n # status_id = regex.group(\"id\")\n url = \"https://api.twitter.com/1.1/statuses/update.json\"\n params = dict(status=tweet,\n # attachment_url=attachment_url,\n # in_reply_to_status_id=status_id,\n )\n auth = user.get_oauth1_token()\n\n r = requests.post(url, params=params, auth=auth)\n if not r.ok:\n raise HTTPException(400, detail={\"message\":\"Something went wrong with Twitter, please try again or contact me @redDevv\",\n \"error from twitter\": r.text})\n tweet = r.json()\n\n new_tweet = Tweet(**tweet)\n user.tweets.append(new_tweet)\n user.requests_made += 1\n\n session.commit()\n return tweet", "def text_to_tweets(text, url):\n max_chars = 240 - 1 - 23 # one removed for punctuation 22 removed for link.\n tweets = [] # buffer of tweets to send\n tweet = \"\" # the current tweet we are composing\n while len(text) > 0: # while we still have text ...\n try:\n while len(tweet) + len(text[0]) + 1 < max_chars:\n # as long as the composed tweet is one less than the character limit\n phrase = text.pop(0)\n if phrase not in [\"? \", \". \", \"! \"]: # If the next piece of text is not punctuation ...\n tweet += \" \" # ... Add a space\n tweet += phrase # and add the text\n else:\n tweet += phrase[0]\n\n # if the net character is a punctuation mark\n if text[0] in [\"? \", \". \"]: # if the next char is a punctuation mark\n\n tweet += text.pop(0)[0] # add it to the end of the tweet\n else:\n tweet += u'…' # otherwise '...'\n except IndexError:\n print(\"INDEX ERROR\") # ... something went wrong ...\n\n if len(tweets) == 0 and url is not None:\n # If there are presently no tweets we need to add the blog link to the post\n # This tells someone where to see your posts.\n max_chars = 240 - 1 # we can now use more characters.\n tweet += \" \" + url #\n\n tweets.append(tweet)\n tweet = \"\"\n\n return tweets", "def post_video(self, comment):\n\t\tpass", "def send_tweet(auth, tweet, in_reply_to=None):\n\tif isinstance(tweet, list):\n\t\t# It's a thread of tweets\n\t\tprev = ret = None\n\t\tfor part in tweet:\n\t\t\tif not part: continue\n\t\t\tinfo = send_tweet(auth, part, in_reply_to=prev)\n\t\t\tif \"error\" in info: return info\n\t\t\tif not ret: ret = info # Return the info for the *first* tweet sent\n\t\t\tprev = info[\"tweet_id\"]\n\t\treturn ret or {\"error\": \"Can't send a thread of nothing but empty tweets\"}\n\ttwitter = OAuth1Session(config.TWITTER_CLIENT_ID, config.TWITTER_CLIENT_SECRET, auth[0], auth[1])\n\tresp = twitter.post(\"https://api.twitter.com/1.1/statuses/update.json\",\n\t\tdata={\"status\": tweet, \"in_reply_to_status_id\": in_reply_to})\n\tif resp.status_code != 200:\n\t\tprint(\"Unknown response from Twitter\")\n\t\tprint(resp.status_code)\n\t\tprint(\"---\")\n\t\tprint(resp.json())\n\t\tprint(\"---\")\n\t\ttry:\n\t\t\t# TODO: Report these to the front end somehow even if asynchronous\n\t\t\treturn {\"error\": \"Unable to send tweet: \" + resp.json()[\"errors\"][0][\"message\"]}\n\t\texcept LookupError:\n\t\t\treturn {\"error\": \"Unknown error response from Twitter (see server console)\"}\n\tr = resp.json()\n\turl = \"https://twitter.com/%s/status/%s\" % (r[\"user\"][\"screen_name\"], r[\"id_str\"])\n\treturn {\"screen_name\": r[\"user\"][\"screen_name\"], \"tweet_id\": r[\"id\"], \"url\": url}", "def bridgy_twitter(location):\n r = send_mention(\n 'http://' + DOMAIN_NAME +'/e/' + location,\n 'https://brid.gy/publish/twitter',\n endpoint='https://brid.gy/publish/webmention'\n )\n location = 'http://' + DOMAIN_NAME +'/e/' + location\n syndication = r.json()\n app.logger.info(syndication)\n data = get_bare_file('data/' + location.split('/e/')[1]+\".md\")\n if data['syndication'] == 'None':\n data['syndication'] = syndication['url']+\",\"\n else:\n data['syndication'] += syndication['url']+\",\"\n entry_re_write(data)", "def tweet_btn_clicked(self,widget, data=None):\n tweet_text = self.get_text(\"txt_tweet\") \n \n #double check the length and go.\n if (len(tweet_text) <= 140): \n self.twitter.UpdateStatus(tweet_text) \n status_label = self.builder.get_object(\"status_lbl\")\n #clear the text box and update the status\n self.builder.get_object(\"txt_tweet\").set_text(\"\")\n my_tweet_bufffer = self.builder.get_object(\"personal_tweet_buffer\")\n iters = my_tweet_bufffer.get_end_iter()\n my_tweet_bufffer.insert(iters, \"%s\\n\\n\" % tweet_text)\n else:\n status_label = self.builder.get_object(\"status_lbl\")\n status_label.set_text(\"Too long: Tweet != Blog -__-\")\n print tweet_text", "def insert_tweet(status):\n status['replies'] = []\n return db.tweets.insert(status)", "def adapt_tweet(feedpost):\n tweet = feedpost['title']\n for action in (_make_links, _clean_name, urlize):\n tweet = action(tweet)\n feedpost['title'] = _get_tweet_number(feedpost['link'])\n feedpost['body'] = u'<p>%s</p>' % tweet\n return feedpost", "def tweet(api):\n logger.info(\"Tweeting content\")\n urls = get_article('the_onion.txt')\n lines = content_list(urls)\n\n for line in lines:\n try:\n api.update_status(line)\n logger.info(\"Tweeting!\")\n time.sleep(SECONDS)\n\n except tweepy.TweepError as err:\n logger.error(err)", "def tweet(message):\n auth = load_twitter_auth()\n key = environ['TWITTER_ACCESS_KEY']\n secret = environ['TWITTER_ACCESS_SECRET']\n auth.set_access_token(key, secret)\n api = tweepy.API(auth)\n api.update_status(message)\n print(message)", "def twitter(self, twitterData):\n # Create an array of PIL objects\n imgArray = []\n iconHeight = 120\n imgQR = self.qrIcon(twitterData['url'], size=iconHeight)\n imgTwit = Image.open(dirname(realpath(__file__)) + sep + pardir + sep + \n \"/artwork/SoMe/agata/twitter.png\").convert(\"1\")\n imgTwit = imgTwit.resize([iconHeight-2*4,iconHeight-2*4]) # QR has a border of 4\n #headTxt = \"%s @%s %s\\n%s\" % (twitterData['name'], twitterData['screen_name'], \n # [ \"retweeted\" if twitterData['retweet'] else \"tweeted\"][0], twitterData['created_at'][:-3])\n headTxt = \"%s %s\\n%s\" % (twitterData['name'], \n [ \"retweeted\" if twitterData['retweet'] else \"tweeted\"][0], twitterData['created_at'][:-3])\n imHeadTxtWidth = self.printerConf['printerWidth'] - 2*iconHeight - 2 - 12\n # Insert PIL w text\n imHeadTxt = self.imText(headTxt, txtWidth=imHeadTxtWidth)\n imHeader = self.imBox(self.printerConf['printerWidth'], \n [ imHeadTxt.size[1] if imHeadTxt.size[1] > iconHeight else iconHeight][0]+4+9)\n # Paste them together\n imHeader.paste(imgTwit,(0,4))\n imHeader.paste(imHeadTxt,(iconHeight+12,4))\n imHeader.paste(imgQR,(iconHeight+2+imHeadTxtWidth+2,0))\n imgArray.append(imHeader)\n imgArray.append(self.imText(twitterData['text']))\n # Add images\n for url in twitterData['urlPics']:\n try:\n url = urllib2.urlopen(url, timeout=10)\n f = StringIO()\n responseIO = StringIO(url.read())\n im = Image.open(responseIO).convert(\"1\")\n imgArray.append(self.imBox(self.printerConf['printerWidth'], 10))\n imgArray.append(im)\n imgArray.append(self.imBox(self.printerConf['printerWidth'], 10))\n except Exception, e:\n print(e)\n errorText = \"Hrmpf... Failed to download picture from Twitter at print time. See the log for details.\"\n imgArray.append(self.imText(errorText, bgColor=0, fontColor=255))\n imgArray.append(self.printLine())\n\n # print it \n imgMaster, height, imgData = self.combinePILObjects(imgArray) \n return (height, imgData, [0 if not self.printerConf['rotate'] else 1][0], \"image/png\")", "def set_text(self, tweet):\n\n if not tweet.has_key('text'):\n return\n\n text = tweet['text']\n\n # remove URIs\n text = re.sub(self.re_uri,\"\", text)\n # lower case string and remove non word characters\n text = re.sub(self.re_non_word, \" \", text.lower()).strip()\n\n self.text = text", "def on_data(self, data):\n\n t = json.loads(data) \n tweet = {\n 'text': t['text'],\n 'username': t['user']['screen_name'],\n 'followers_count': t['user']['followers_count']\n }\n\n logging.critical(f'\\n\\n\\nTWEET INCOMING: {tweet[\"text\"]}\\n\\n\\n')\n tweet_collection.insert({'username' : tweet['username'],'followers_count' : tweet['followers_count'], 'text' : tweet['text']})", "def tweet(user):\n api = get_api(user)\n msg = 'I used hackt to follow @hackerschool batches on twitter. You can too at http://bit.ly/hs_hackt'\n\n try:\n api.PostUpdate(msg)\n except twitter.TwitterError as error:\n return {'msg': error.message[0]['message']}", "def post_to_channel(self, text):\n self.slack_client.api_call(\n \"chat.postMessage\",\n channel=self.config.SLACK_CHANNEL,\n text=text,\n username='pybot',\n icon_emoji=':robot_face:'\n )", "def send_text_via_twitter(self, recipient, text=\"\"):\n\n if not self.twitter_api and text:\n return False\n recipient = self.sanitise_twitter_account(recipient)\n try:\n can_dm = self.twitter_api.exists_friendship(recipient, self.twitter_account)\n except tweepy.TweepError: # recipient not found\n return False\n if can_dm:\n chunks = self.break_to_chunks(text, TWITTER_MAX_CHARS)\n for c in chunks:\n try:\n # Note: send_direct_message() requires explicit kwargs (at least in tweepy 1.5)\n # See http://groups.google.com/group/tweepy/msg/790fcab8bc6affb5\n self.twitter_api.send_direct_message(screen_name=recipient, text=c)\n except tweepy.TweepError:\n s3_debug(\"Unable to Tweet DM\")\n else:\n prefix = \"@%s \" % recipient\n chunks = self.break_to_chunks(text, TWITTER_MAX_CHARS - len(prefix))\n for c in chunks:\n try:\n self.twitter_api.update_status(prefix + c)\n except tweepy.TweepError:\n s3_debug(\"Unable to Tweet @mention\")\n return True", "def post_to_twitter(worker_responses):\n for worker_response in worker_responses:\n print get_tweet_text(worker_response)", "def post_to_twitter(content) -> None:\n logging.error(f'No function registered to handle posting type: {type(content)} to twitter')\n pass", "def tweet_results(dl):\n\n TWT = twt.get_api()\n try:\n if(DEBUG):\n print(\"=====Mock Posting=====\")\n print(MSG.format(ISP, dl, ISP_DL))\n print(\"=====End=====\", flush=True)\n else:\n TWT.update_status(MSG.format(ISP, dl, ISP_DL))\n except:\n print(\"error posting to twitter\", flush=True)", "def upload_image_to_twitter(file_to_upload):\n oauth_client = get_3_legged_auth_client()\n\n image_bytes = file_to_upload.read()\n\n response = oauth_client.post(url=URL_TWITTER_MEDIA_UPLOAD,\n json={\n # 'command': 'INIT',\n 'media_type': 'image/jpeg',\n 'media': image_bytes,\n 'media_category': 'tweet_image'\n })\n\n # auth = tweepy.AppAuthHandler(consumer_key=CONSUMER_KEY,\n # consumer_secret=CONSUMER_SECRET)\n #\n # response = oauth_client.media_upload(filename=\"../assets/captain-its-wednesday.jpeg\",\n # file=file_to_upload,\n # media_category=\"tweet_image\")\n\n # tweepy.Client.create_tweet()\n\n # def upload_image_to_twitter(image_binary):\n # # auth = HTTPBasicAuth(CONSUMER_KEY, CONSUMER_SECRET)\n # # client = BackendApplicationClient(client_id=CONSUMER_KEY)\n # # oauth2 = OAuth2Session(client=client)\n # # token_response = oauth2.fetch_token(token_url=URL_OAUTH2_BEARER_TOKEN, auth=auth)\n # # bearer_token = token_response[\"access_token\"]\n # #\n # # response = requests.post(url=URL_TWITTER_MEDIA_UPLOAD,\n # # data={\n # # # 'command': 'INIT',\n # # 'media_type': 'image/jpeg',\n # # 'media': image_binary,\n # # 'media_category': 'tweet_image'\n # # },\n # # headers={\"Authorization\": f\"Bearer {bearer_token}\"}\n # # )\n #\n # # TODO try to do bearbones Basic Auth, this POST was not working correcting,\n # # username_password = f\"{CONSUMER_KEY}:{CONSUMER_SECRET}\"\n # # bytes_encoded = username_password.encode(\"utf-8\")\n # # base64_username_password = base64.b64encode(bytes_encoded)\n # #\n # # print(bytes_encoded)\n # # print(base64_username_password)\n # # asdf = requests.post(url=URL_OAUTH2_BEARER_TOKEN,\n # # data={\"grant_type\": \"client_credentials\"},\n # # headers={\"Authorization\": f\"Basic {base64_username_password}\"\n # # 'Content-Type': 'application/x-www-form-urlencoded'\n # # })\n\n logging.info(response.status_code)\n logging.info(response.json())", "def postTweet(self, userId: int, tweetId: int) -> None:\n ts = time.time()\n self.posts[userId].append((ts, tweetId))", "def draw_tweet(self, tweet_content):\n\n tweet = Text(self.twitter_frame, bg=\"white\", fg=\"black\", wrap=WORD, \n height=6, font=('arial', 10), pady=5, padx=10)\n\n tweet.insert(1.0, tweet_content)\n tweet.configure(state='disabled')\n tweet.pack(side=TOP)", "def on_data(self, data):\n\n t = json.loads(data)\n\n\n if 'extended_tweet' in t:\n text = t['extended_tweet']['full_text']\n else:\n text = t['text']\n\n\n is_tweet_reply = t['in_reply_to_status_id'] == None\n is_quote = t['is_quote_status'] == False\n\n if 'RT' not in t['text'] and is_tweet_reply and is_quote:\n\n tweet = {'text': text, 'username' : t['user']['screen_name'],\n 'number_of_followers' : t['user']['followers_count'],\n 'location' : t['user']['location'], 'number_of_friends' : t['user']['friends_count'], 'retweet_count' :\n t['retweet_count']}\n\n\n logging.critical('\\n\\n\\nNEW TWEET INCOMING: ' + tweet['text']) \n \n \n load_tweet_into_mongo(tweet)\n logging.critical('\\n\\n\\nSUCCESSFULLY DUMPED INTO MONGO!')", "def retweet(tweet):\n\n twitter.PostRetweet(tweet.id, trim_user=False)\n\n return", "def build_tweet():\n\n verb = conjugate(random.choice(verbs)['present'], tense=PARTICIPLE, parse=True).title()\n animal = random.choice(animals).title()\n food = random.choice(foods).title()\n noun = random.choice(nouns).title()\n\n band = food + \" \" + noun\n track = verb + \" \" + animal\n\n feature1 = clean_feature(random.choice(j['features']))\n feature2 = clean_feature(random.choice(j['features']))\n feature3 = random.choice(j['features'])\n\n dont = \"\"\n if random.randrange(100) <= 50:\n dont = \"don't \"\n\n s = \"I \" + dont + \"like the \" + feature1 + \" and \" + feature2 + \" with \" + \\\n feature3 + \" in \" + band + \"'s \" + '\"' + track + '\"'\n\n return s", "def tweet(self):\n library = os.path.join(os.path.dirname(__file__),\n \"..//libraries//reuse//\")\n \n ql = QuickList().open(os.path.join(library,\"pool.xls\"))\n \n ql.shuffle()\n \n for r in ql:\n file_loc = os.path.join(library,r[\"file_name\"])\n text = r[\"nice_title\"]\n tags = [\n r['title'],\n str(r['year']),\n \"culture reuse\" \n ]\n \n name, gif_url = self._upload_gif(file_loc)\n\n \n #embed_code = \"<img class='gfyitem' data-id='JoyfulCircularHamster' />\".format(gif_url)\n embed_code = \"<img class='gfyitem' data-id='{0}' />\".format(name)\n \n tumblr_text = embed_code + '<p>{0}</p><p><a href=\"{1}\">get from gfycat</a></p>'.format(text,gif_url)\n \n tumblr_link = self._tumblr(tumblr_text,tags=tags,keyword=name) #video_url=str(file_loc)\n if tumblr_link:\n text += \" {0}\".format(tumblr_link)\n tweets = self._tweet_video(text,file_loc)\n \n break\n \n return tweets", "def test_reply(self):\n tweet_object = self.load_tweet('reply')\n tweet_text = self.api.html_for_tweet(tweet_object)\n self.assertEqual(tweet_text,\n u'<span class=\"twython-tweet-prefix\"><a href=\"https://twitter.com/philgyford\" class=\"twython-mention\">@philgyford</a> </span>Here’s a test tweet that goes on as much as possible and includes an image. Hi to my fans in testland!<span class=\"twython-tweet-suffix\"> https://t.co/tzhyk2QWSr</span>')", "def post_video(self, url: str, text: str) -> bool:\n return False", "def enrich(self, tweet):\n tweet = urlize_tweet(expand_tweet_urls(tweet))\n # parses created_at \"Wed Aug 27 13:08:45 +0000 2008\"\n\n if settings.USE_TZ:\n tweet['datetime'] = datetime.strptime(tweet['created_at'], '%a %b %d %H:%M:%S +0000 %Y').replace(tzinfo=timezone.utc)\n else:\n tweet['datetime'] = datetime.strptime(tweet['created_at'], '%a %b %d %H:%M:%S +0000 %Y')\n\n return tweet", "def post_attachment(self, message, emoji=None, color=None, fallback=None, markdown=True):\n if not emoji:\n emoji = self.cfg['slack_emoji']\n attachments = [{\"text\": message}]\n if color:\n attachments[0]['color'] = color\n if markdown:\n attachments[0]['mrkdwn_in'] = ['text']\n if fallback:\n attachments[0]['fallback'] = fallback\n else:\n # XXX: fallback should be a plaintext version stripped of markdown. For now we just post the same.\n attachments[0]['fallback'] = message\n\n response = self.slack.api_call(\n \"chat.postMessage\", channel=self.cfg['slack_channel'], attachments=attachments,\n username=self.cfg['slack_username'], icon_emoji=emoji\n )\n if 'ok' in response:\n return True\n logging.error(\"Error sending message: %s\", response['error'])\n return False", "def on_success(self, status):\n # print(status['text'], status['id'])\n if is_wcw(status): # Use function for testing the phrase\n try:\n api.retweet(id=status['id'])\n except TwythonError:\n pass", "async def tweet_feeder(self): \n try:\n data=json.loads(self.request.body.decode('utf-8'))\n except: \n print(\"No data body!\")\n\n t=Tweet()\n t.tweet_id = data[\"tweet_id\"]\n t.text=data[\"text\"]\n #\n # update the hashtags cache\n #\n try:\n t.hashtags=data[\"hashtags\"] \n for htag in t.hashtags:\n #print(\"adding to hashtags: {} to cache:\".format(htag[\"text\"], ))\n if htag[\"text\"] in hash_cache:\n hash_cache[htag[\"text\"]] += 1\n else:\n hash_cache[htag[\"text\"]] = 1\n except:\n t.hashtags=[]\n \n #\n # update the user cache\n #\n try:\n user_id = \"@\" + data[\"user_screenname\"]\n if user_id in user_cache:\n user_cache[user_id] += 1\n else:\n user_cache[user_id] = 1\n except:\n print(\" ERR No User: should never happen\")\n\n try:\n t.user_screenname=data[\"user_screenname\"]\n except:\n t.user_screenname=\"\"\n try:\n t.profile_image_url_https = data[\"profile_image_url_https\"]\n except:\n t.profile_image_url_https = \"\"\n #\n # update the tweets cache\n #\n try:\n t.timestamp = data[\"timestamp\"]\n except:\n t.timestamp = datetime.datetime.utcnow()\n tweet_cache.append(t.to_dict())\n \n #\n # get the embed html from twitter oembed API\n #\n r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #print(r.json())\n \n #print(self.__class__.callbacks)\n await self.fire_callbacks(r.json())\n #self.success(message=\"Added tweet id: {} \".format(str(id)), data=t.to_json(), format=\"json\", pure=True)", "async def batphone(self, ctx, *, message: add_est_timestamp = None):\n\n if not message:\n return ctx.send(f\"\"\"{ctx.author.mention} I'm already doing 90% of the work. \n Do you want me to come up with the message too?\"\"\")\n\n status = twitterapi.post_tweet(message)\n\n embed = discord.Embed(title='Batphone',\n url=f'https://twitter.com/AmtrakEq/status/{status.id}',\n description=message,\n colour=discord.Colour.red())\n embed.set_image(url=gifs.get_one_gif(\"thomas the train\"))\n embed.set_author(name=ctx.author.display_name, icon_url=ctx.author.avatar_url)\n\n await ctx.bot.batphone_channel.send(f'@everyone {status.text}', embed=embed)\n await ctx.message.delete()", "def format_tweet(tweet):\n user = tweet['user']\n return {\n 'tweet_id': tweet['id'],\n 'hashtag': HASHTAG,\n 'text': tweet['text'],\n 'created_at': tweet['created_at'],\n 'user': {\n 'user_id': user['id'],\n 'name': user['name'],\n 'handle': user['screen_name'],\n 'profile_image_url': user['profile_image_url'],\n 'profile_url': f\"https://twitter.com/{user['screen_name']}\"\n }\n }", "def postTweet(self, userId, tweetId):\r\n self.timestamp += 1\r\n self.tweets_by_user[userId].append((self.timestamp, tweetId))", "def on_mention_with_image(self, tweet, prefix, image):\n\n\t\t# process image, resulting in a new image and a comment\n\t\timage, status = self.process_image(image, prefix)\n\n\t\t# filename and format for uplaoding\n\t\tfilename, format = \"result.jpg\", \"JPEG\"\n\n\t\t# write image to a StringIO file\n\t\tfile = StringIO.StringIO()\n\t\timage.save(file, format=format)\n\n\t\t# post tweet\n\t\ttry:\n\t\t\tself.post_tweet(status[:140], reply_to=tweet, media=filename, file=file)\n\t\texcept Exception as e:\n\t\t\tprint(e)", "async def quote_tweet(quoted_reply:str,\n attachment_url: str = Query(..., alias=\"link of tweet\", regex=\"https://twitter.com/([\\w_]+)/status/([\\d]+)\"),\n user: User = Depends(get_current_user),\n session: Session = Depends(get_db)\n )-> TweetSchema:\n if not user.active:\n raise HTTPException(401, detail=\"Your account seems to be inactive, please login with twitter to make tweets\")\n\n # regex = re.match(\"https://twitter.com/(?P<username>[\\w]+)/status/(?P<id>[\\d]+)\", in_reply_to)\n # status_id = regex.group(\"id\")\n\n url = \"https://api.twitter.com/1.1/statuses/update.json\"\n params = dict(status=quoted_reply,\n attachment_url=attachment_url,\n # auto_populate_reply_metadata=True\n )\n auth = user.get_oauth1_token()\n\n r = requests.post(url, params=params, auth=auth)\n if not r.ok:\n raise HTTPException(400, detail={\"message\":\"Something went wrong with Twitter, please try again or contact me @redDevv\",\n \"error from twitter\": r.text})\n tweet = r.json()\n\n new_tweet = Tweet(**tweet)\n user.tweets.append(new_tweet)\n user.requests_made += 1\n\n session.commit()\n return tweet", "def add_tweet(self, tweet):\r\n self.tweets.append(tweet)", "def tumblrPost(body,tags=None,title=None):\t\n\tglobal settings\n\tpost_address = \"http://api.tumblr.com/v2/blog/\"+settings['blog']+\"/post\"\n\tdata = {\"type\":\"text\",\"state\":settings['state'],\"body\" : body, \"oauth_token\":settings['oauth_token']}\n\tif tags != None:\n\t\tdata['tags'] = str(tags)\n\tif title != None:\n\t\tdata['title'] = str(title)\n\tconsumer = oauth.Consumer(settings['consumer_key'], settings['consumer_secret'])\n\ttoken = oauth.Token(settings['oauth_token'], settings['oauth_token_secret'])\n\tclient = oauth.Client(consumer,token)\n\tresp, content = client.request(post_address, \"POST\", urllib.urlencode(data))\n\tjs = json.loads(content)\n \tif js['meta']['status'] != 201: #something wrong\n\t\tprint js['meta']['msg']\n\t\tprint js", "def topictweets(url):\n article = get_article(url)\n keywords = get_keywords(article['text'])\n entities = get_entities(article['text'])\n q = twitter_query(keywords, entities)\n result = search({'q': q, 'count': 100, 'result_type': 'mixed'})\n tweets = screen_name_filter(result.statuses, 'media')\n return tweets", "def insert_tweets(post):\n db_file = dbFile\n try:\n conn = sqlite3.connect(db_file)\n except Exception as e:\n print(e)\n for i in range(0,len(post['id_str'])):\n tweet={}\n tweet['user_id']=post['user_id']\n tweet['created_at'] = post['created_at'][i]\n tweet['id_str'] = post['id_str'][i]\n tweet['text'] = post['text'][i]\n tweet['source'] = post['source'][i]\n tweet['truncated'] = post['truncated'][i]\n tweet['in_reply_to_status_id_str'] = post['in_reply_to_status_id_str'][i]\n tweet['in_reply_to_screen_name'] = post['in_reply_to_screen_name'][i]\n tweet['coordinatesNumber'] = post['coordinatesNumber'][i]\n tweet['coordinates'] = post['coordinates'][i]\n tweet['coordinatesType'] = post['coordinatesType'][i]\n tweet['placeCountry'] = post['placeCountry'][i]\n tweet['placeCountryCode'] = post['placeCountryCode'][i]\n tweet['placeFullName'] = post['placeFullName'][i]\n tweet['placeID'] = post['placeID'][i]\n tweet['placeName'] = post['placeName'][i]\n tweet['placeType'] = post['placeType'][i]\n tweet['placeURL'] = post['placeURL'][i]\n tweet['quoted_status_id_str'] = post['quoted_status_id_str'][i]\n tweet['is_quote_status'] = post['is_quote_status'][i]\n tweet['retweeted_status'] = post['retweeted_status'][i]\n tweet['quote_count'] = post['quote_count'][i]\n tweet['reply_count'] = post['reply_count'][i]\n tweet['retweet_count'] = post['retweet_count'][i]\n tweet['favorite_count'] = post['favorite_count'][i]\n tweet['hashtagsNumber'] = post['hashtagsNumber'][i]\n tweet['hashtags'] = post['hashtags'][i]\n tweet['urls'] = post['urls'][i]\n tweet['urlsNumber'] = post['urlsNumber'][i]\n tweet['user_mentionsNumber'] = post['user_mentionsNumber'][i]\n tweet['user_mentions'] = post['user_mentions'][i]\n tweet['mediaNumber'] = post['mediaNumber'][i]\n tweet['mediaURLs'] = post['mediaURLs'][i]\n tweet['mediaType'] = post['mediaType'][i]\n tweet['symbolsNumber'] = post['symbolsNumber'][i]\n tweet['symbols'] = post['symbols'][i]\n tweet['pollsNumber'] = post['pollsNumber'][i]\n tweet['polls'] = post['polls'][i]\n tweet['possibly_sensitive'] = post['possibly_sensitive'][i]\n tweet['filter_level'] = post['filter_level'][i]\n tweet['lang'] = post['lang'][i]\n tweet['matching_rulesNumber'] = post['matching_rulesNumber'][i]\n tweet['matching_rulesTag'] = post['matching_rulesTag'][i]\n tweet['matching_rulesID'] = post['matching_rulesID'][i]\n tweet['collected_at'] = strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n sqlite_insert(conn, 'GTapp_tweets', tweet)", "async def tweepy_on_status(self, tweet):\n self.processed_tweets += 1\n if self.skip_tweet(tweet):\n return\n\n chan_conf = dutils.get(self.conf.follows, id=tweet.author.id_str)\n try:\n embed = await self.prepare_embed(tweet)\n content = None\n except:\n embed = None\n content = 'Failed to prepare embed for ' + tweet.tweet_web_url # If the preparation failed before setting tweet.tweet_web_url imma kms\n log.error('Failed to prepare embed for ' + str(tweet._json))\n\n # Make sure we're ready to send messages\n await self.bot.wait_until_ready()\n\n for channel in chan_conf.discord_channels:\n discord_channel = self.bot.get_channel(channel.id)\n\n # Check if the channel still exists\n if discord_channel is None:\n log.error('Channel {} unavailable to display tweet {}.'.format(discord_channel.id, tweet.id_str))\n continue\n\n # Check for required permissions\n perms = discord_channel.permissions_for(discord_channel.server.me)\n if not perms.embed_links:\n log.warning('Improper permissions in channel {} to display tweet {}.'.format(discord_channel.id, tweet.id_str))\n try:\n warning = '\\N{WARNING SIGN} Missed tweet from {} : `Embed links` permission missing. \\N{WARNING SIGN}'.format(tweet.author.screen_name)\n await self.bot.send_message(discord_channel, warning)\n except discord.DiscordException as e:\n log.error('Could not send warning to channel {}.\\n{}'.format(discord_channel.id, e))\n continue\n\n # Send the embed to the appropriate channel\n log.debug('Scheduling Discord message on channel ({}) : {}'.format(channel.id, tweet.text))\n await self.bot.send_message(discord_channel, content=content, embed=embed)\n\n # Update stats and latest id when processing newer tweets\n if tweet.id > chan_conf.latest_received:\n channel.received_count += 1\n chan_conf.latest_received = tweet.id\n self.conf.save()", "def tweet_user(self, target, msg=None):\n self.log.debug(\"Tweeting %s\" % target.hunted.screen_name)\n tweet = \"@%s: %s\" % (target.hunted.screen_name,\n random.sample(self.tweets, 1)[0])\n tweet = tweet [:140]\n self.api.update_status(tweet)\n target.status = Target.FOLLOWER\n target.save()", "def write_tweet(tweet):\n try:\n tweet_data = [tweet.date, tweet.content.encode('utf-8'), tweet.id, tweet.likeCount,\n tweet.replyCount,\n tweet.retweetCount, tweet.quoteCount,\n tweet.user.username, tweet.user.id, tweet.user.followersCount,\n tweet.user.friendsCount,\n tweet.user.statusesCount, tweet.user.verified, tweet.user.url, tweet.url]\n if tweet.mentionedUsers is not None:\n tweet_data.append([tweet.mentionedUsers])\n else:\n tweet_data.append(None)\n if tweet.quotedTweet is not None:\n tweet_data.append(tweet.quotedTweet.id)\n tweet_data.append(tweet.quotedTweet.content.encode('utf-8'))\n tweet_data.append(tweet.quotedTweet.user.username)\n tweet_data.append(tweet.quotedTweet.user.id)\n if tweet.quotedTweet.mentionedUsers is not None:\n tweet_data.append([tweet.quotedTweet.mentionedUsers])\n else:\n tweet_data.append(None)\n else:\n tweet_data.append(None)\n tweet_data.append(None)\n tweet_data.append(None)\n tweet_data.append(None)\n return tweet_data\n except UnicodeEncodeError:\n pass", "def htmlify_tweet(json_data):\n\n # Temporary, until Twython.html_for_tweet() can handle tweets with\n # 'full_text' attributes.\n if \"full_text\" in json_data:\n json_data[\"text\"] = json_data[\"full_text\"]\n\n # Some Tweets (eg from a downloaded archive) don't have entities['symbols']\n # which Twython.html_for_tweet() currently expects.\n # Not needed once github.com/ryanmcgrath/twython/pull/451 is in Twython.\n if \"entities\" in json_data and \"symbols\" not in json_data[\"entities\"]:\n json_data[\"entities\"][\"symbols\"] = []\n\n # Some Tweets (eg from a downloaded archive) have strings instead of ints\n # to define text ranges. [\"0\", \"140\"] rather than [0, 140].\n # We fix those here so that Twython doesn't complain.\n if \"display_text_range\" in json_data:\n json_data[\"display_text_range\"] = [\n int(n) for n in json_data[\"display_text_range\"]\n ]\n if \"entities\" in json_data:\n for key, value in json_data[\"entities\"].items():\n for count, entity in enumerate(value):\n if \"indices\" in entity:\n json_data[\"entities\"][key][count][\"indices\"] = [\n int(n) for n in entity[\"indices\"]\n ]\n\n # This does most of the work for us:\n # https://twython.readthedocs.org/en/latest/usage/special_functions.html#html-for-tweet\n html = Twython.html_for_tweet(\n json_data, use_display_url=True, use_expanded_url=False\n )\n\n # Need to do some tidying up:\n\n try:\n ents = json_data[\"entities\"]\n except KeyError:\n ents = {}\n\n urls_count = len(ents[\"urls\"]) if \"urls\" in ents else 0\n media_count = len(ents[\"media\"]) if \"media\" in ents else 0\n hashtags_count = len(ents[\"hashtags\"]) if \"hashtags\" in ents else 0\n symbols_count = len(ents[\"symbols\"]) if \"symbols\" in ents else 0\n user_mentions_count = len(ents[\"user_mentions\"]) if \"user_mentions\" in ents else 0\n\n # Replace the classes Twython adds with rel=\"external\".\n html = html.replace('class=\"twython-hashtag\"', 'rel=\"external\"')\n html = html.replace('class=\"twython-mention\"', 'rel=\"external\"')\n html = html.replace('class=\"twython-media\"', 'rel=\"external\"')\n html = html.replace('class=\"twython-symbol\"', 'rel=\"external\"')\n\n # Twython uses the t.co URLs in the anchor tags.\n # We want to replace those with the full original URLs.\n # And replace the class it adds with rel=\"external\".\n if (urls_count + media_count) > 0 and urls_count > 0:\n for url in ents[\"urls\"]:\n html = html.replace(\n '<a href=\"%s\" class=\"twython-url\">' % url[\"url\"],\n '<a href=\"%s\" rel=\"external\">' % url[\"expanded_url\"],\n )\n\n if media_count > 0:\n # Remove any media links, as we'll make the photos/movies visible in\n # the page. All being well.\n for item in ents[\"media\"]:\n html = html.replace(\n '<a href=\"%s\" rel=\"external\">%s</a>'\n % (item[\"url\"], item[\"display_url\"]),\n \"\",\n )\n\n if (\n urls_count + media_count + hashtags_count + symbols_count + user_mentions_count\n ) == 0:\n # Older Tweets might contain links but have no 'urls'/'media' entities.\n # So just make their links into clickable links:\n # But don't do this for newer Tweets which have an entities element,\n # or we'll end up trying to make links from, say user_mentions we\n # linked earlier.\n html = urlize(html)\n\n # Replace newlines with <br>s\n html = re.sub(r\"\\n\", \"<br>\", html.strip())\n\n return html", "def slack_post(channel, thread=None, text=None, content=None, username=None, icon_url=None, attachment=None):\n\n if not settings.SLACK_TOKEN:\n return {'ok': False, 'error': 'config_error'}\n\n client = WebClient(token=settings.SLACK_TOKEN)\n\n if attachment:\n filename = attachment['filepath'].split('/')[-1]\n return upload(attachment['filepath'], filename, attachment['name'], text, channel)\n\n if content:\n try:\n if username:\n response = client.chat_postMessage(channel=channel, thread_ts=thread, blocks=content, text=text,\n username=username, icon_url=icon_url)\n else:\n response = client.chat_postMessage(channel=channel, thread_ts=thread, blocks=content, text=text)\n assert response['ok'] is True\n return {'ok': True, 'message': response['message']}\n except SlackApiError as e:\n assert e.response['ok'] is False\n return e.response\n elif text:\n try:\n if username:\n response = client.chat_postMessage(channel=channel, thread_ts=thread, text=text, username=username,\n icon_url=icon_url)\n else:\n response = client.chat_postMessage(channel=channel, thread_ts=thread, text=text)\n assert response['ok'] is True\n return {'ok': True, 'message': response['message']}\n except SlackApiError as e:\n assert e.response['ok'] is False\n return e.response\n elif not content and not text:\n return {'ok': False, 'error': 'no_text'}", "def send_tweet(in_list):\n\n with open('credentials.json') as json_file:\n creds = json.load(json_file)\n\n twit_creds = creds['twitter']\n consumer_key = twit_creds['consumer_key']\n consumer_secret = twit_creds['consumer_secret']\n access_token = twit_creds['access_token']\n access_token_secret = twit_creds['access_token_secret']\n\n for dev in in_list:\n to_tweet = f\"New #Aberdeen AQ device found. ID = {dev}. See it on a map: http://uk.maps.luftdaten.info/#9/57.3406/-1.9226 \"\n # tweet the message\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n tweepyapi = tweepy.API(auth)\n tweepyapi.update_status(to_tweet)\n # print(\"Tweeted\")", "def postTweet(self, userId: int, tweetId: int) -> None:\n self.users.add(userId)\n self.user_post[userId].append((tweetId, self.time))\n self.time += 1", "def send_fixtures_tweets(tweet1, tweet2, tweet3):\n\n # Authorises Twitter API connection\n api = twitter_auth()\n\n # Checks if tweet has already been made today\n get_tweet = api.user_timeline(count=1,tweet_mode=\"extended\")\n last_tweet = get_tweet[0].full_text\n tweet = tweet1[:-1]\n if last_tweet == tweet:\n return print('Tweet already sent')\n \n # Sends tweets to timeline, depending on how many tweets created\n # Multiple tweets sent as a thread by responding to previous tweet\n if tweet3:\n first_tweet = api.update_status(tweet1)\n first_id = first_tweet.id\n second_tweet = api.update_status(tweet2, first_id)\n second_id = second_tweet.id\n api.update_status(tweet3, second_id)\n return print('Successfully sent tweet(s)')\n elif tweet2:\n first_tweet = api.update_status(tweet1)\n first_id = first_tweet.id\n api.update_status(tweet2, first_id)\n return print('Successfully sent tweet(s)')\n else:\n api.update_status(tweet1)\n return print('Successfully sent tweet(s)')" ]
[ "0.7451198", "0.7257137", "0.70719504", "0.701658", "0.6817208", "0.6798214", "0.67413294", "0.6739724", "0.67385334", "0.67385334", "0.67385334", "0.6715136", "0.6670937", "0.66504085", "0.6643696", "0.6608908", "0.6608185", "0.65740305", "0.6516414", "0.64698046", "0.63579446", "0.6352277", "0.6336495", "0.6324533", "0.62515724", "0.6230764", "0.62238747", "0.62156594", "0.6179312", "0.6176518", "0.61739516", "0.6108495", "0.61022776", "0.61004394", "0.60768753", "0.6066304", "0.6062478", "0.605947", "0.60346115", "0.60324544", "0.6028119", "0.59914154", "0.5988965", "0.59866226", "0.59800994", "0.5959331", "0.5925855", "0.59182507", "0.5898415", "0.58660376", "0.5864915", "0.58487123", "0.5790338", "0.5774914", "0.5771776", "0.5763681", "0.57586026", "0.575432", "0.57259095", "0.57089984", "0.57077396", "0.5648714", "0.5633776", "0.56099904", "0.5591688", "0.559092", "0.55900085", "0.55596155", "0.55559844", "0.55548507", "0.5516098", "0.55153984", "0.5514014", "0.5513054", "0.5511297", "0.54975754", "0.5482275", "0.5480662", "0.5471272", "0.54601437", "0.5457115", "0.5451269", "0.54315317", "0.5429201", "0.54227406", "0.54098105", "0.54094523", "0.5406631", "0.5403146", "0.5394874", "0.5390874", "0.53890646", "0.53860235", "0.5382978", "0.53825235", "0.53696233", "0.5368305", "0.53680634", "0.5358699", "0.53435016" ]
0.6901592
4
Route for getting tweets by hashtag.
def get_tweets_by_hashtag_route(hashtag): response, code = get_tweets_by_hashtag( hashtag, request.args.get('limit', 30)) return jsonify(response), code
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getByHashtags(hashtag):\n\n # set page_limits. The default is 1 \n pages_limit = request.args.get('pages_limit') or 1\n pages_limit = int(pages_limit)\n\n raw_response = get_response(tw_api, 'search/tweets', { 'q': '#' + hashtag, 'count': 100 }, pages_limit)\n list_response = convert_resp2list(raw_response)\n return jsonify(list_response)", "def get_tweets(hashtag):\n api = twitter.Api(consumer_key=TWITTER_API_CONSUMER_KEY,\n consumer_secret=TWITTER_API_CONSUMER_SECRET,\n access_token_key=TWITTER_API_ACCESS_TOKEN_KEY,\n access_token_secret=TWITTER_API_ACCESS_TOKEN_SECRET)\n\n query = (f\"q=%23{HASHTAG}%20-RT\"\n f\"&result_type=recent&since=2019-01-01&count={NUM_TWEETS}\")\n results = api.GetSearch(raw_query=query)\n\n return [\n format_tweet(tweet.AsDict())\n for tweet in results\n ]", "def gettweets(request):\n temp = json.loads(request.body)\n print (temp['hashtags'])\n return Response(tw_fetcher.gethashes(temp['hashtags']), status=status.HTTP_201_CREATED)", "def get_hashtag_tweets(self, hashtag,\n count=settings.TWITTER_DEFAULT_LIMIT):\n url = urljoin(self.base_url, \"/search/tweets.json\")\n response = self.session.get(\n url,\n params={\n \"q\": hashtag,\n \"count\": count,\n \"include_entities\": True\n },\n auth=self.__auth,\n )\n data = response.json()\n if response.ok:\n data = [Tweet(tweet_data) for tweet_data in data['statuses']]\n else:\n if 'error' in data:\n raise TwitterException(data['error'], code=response.status_code)\n elif 'errors' in data:\n error = data['errors'][0]\n raise TwitterException(error['message'], code=response.status_code)\n return data", "def get_tweets_by_user_route(username):\n response, code = get_tweets_by_user(\n username, request.args.get('limit', 30))\n return jsonify(response), code", "def get_hashtag_info(self, hashtag):\n uri = 'hashtags/' + hashtag\n return self.make_request(uri)", "def hashtag_view(request, hashtag_slug=None):\r\n # get hashtag by its slug.\r\n hashtag = get_object_or_404(Hashtag, slug=hashtag_slug)\r\n # get all items that have this hashtag.\r\n items = Item.objects.filter(hashtags=hashtag)\r\n context = {'hashtag':hashtag, 'items':items}\r\n return render(request, 'explore/hashtag.html', context)", "def filter_by_hashtag(tweets: list, hashtag: str) -> list:\n tweets_with_hashtag = {} # findall(): Kui tekstis on rohkem kui üks regulaaravaldisele vastav alamsõne saab kõikide vastete järjendi moodustada funktsiooniga findall()\n pattern = r\"#\\w+\" # \\w : tähed, numbrid, alakriips, + : 1 või rohkem\n for tweet in tweets: # r\"string\" on \"raw\" tüüpi string, mis tähendab, et kurakaldkriipsud(\"\\\") jäetakse teksti alles.\n find_hashtag = re.findall(pattern, tweet.content) # word:\\w\\w\\w. Regulaaravaldisele vastab täpne sõne \"word:\" ning sellele järgnevad 3 suvalist tähte.\n if find_hashtag:\n tweets_with_hashtag.setdefault(ht, []).append(tweet)\n return tweets_with_hashtag[hashtag]", "def api_get_tweets(request, topic):\n bottom_id = request.query_params.get('bottomId', None)\n\n if bottom_id is None:\n tweets = get_first_tweets(topic)\n if tweets:\n for tweet in tweets:\n tweet['data']['id'] = str(tweet['data']['id'])\n return Response({\"tweets\": tweets}, status=status.HTTP_200_OK)\n\n return Response({\"error\": \"topic not supported\"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n else:\n bottom_id = int(bottom_id)\n tweets = get_next_tweets(topic, bottom_id)\n if tweets:\n for tweet in tweets:\n tweet['data']['id'] = str(tweet['data']['id'])\n return Response({\"tweets\": tweets}, status=status.HTTP_200_OK)\n\n return Response({\"error\": \"no tweets anymore\"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "def analyze_hashtag(self, hashtag, count=200):\n tweets = []\n\n for x in xrange(0, int(count / 100)):\n tweets.extend(self.tweet_fetcher.get_tweets(hashtag))\n\n analyzed_tweets = sort_tweets(self.sa.classify(tweets))\n\n self.analyzed_tweets = analyzed_tweets\n\n return analyzed_tweets", "def search_by_hashtag(request):\n if request.method == \"POST\":\n token = request.data.get('token')\n post_id = request.data.get('post_id')\n type_ = request.data.get('type')\n hashtag = request.data.get('hashtag')\n\n if Token.objects.filter(key=token).exists():\n token = get_object_or_404(Token, key=token)\n posts_ids = PostHashtag.objects.filter(hashtag__contains=hashtag). \\\n values_list(\"post_id\", flat=True)\n\n if post_id == -1:\n posts = Post.objects.filter(pk__in=posts_ids).order_by(\"-date\")[:PAGE_OFFSET]\n elif type_ == 'old':\n posts = Post.objects.filter(pk__in=posts_ids, pk__lt=post_id).order_by(\"-date\")[:PAGE_OFFSET]\n else: # 'new'\n posts = reversed(Post.objects.filter(pk__in=posts_ids, pk__gt=post_id).order_by(\"date\")[:PAGE_OFFSET])\n\n serializer = PostSerializer(posts, context={'user_id': token.user_id}, many=True)\n return Response({\"success\": 66,\n \"posts\": serializer.data})\n else:\n return Response({\"error\": 17})", "def tweets_view(request, tweet_id, *args, **kwargs):\n data= {\n \"id\": tweet_id,\n }\n status = 200\n try:\n obj = Tweet.objects.get(id=tweet_id)\n data['content']= obj.content\n except: \n data['message'] = 'Not Found'\n status = 404\n return JsonResponse(data, status=status)", "def get_tweets(api):\n return api.user_timeline()", "def get_tweets(twitter, screen_name, num_tweets):\n\n request = robust_request(twitter, 'search/tweets', {'q': screen_name, 'count': num_tweets})\n tweets = [a['text'] for a in request]\n\n return tweets", "async def get_tweets(self, ctx, username: str, count: int):\n cnt = count\n if count > 25:\n cnt = 25\n\n if username is not None:\n if cnt < 1:\n await self.bot.say(\"I can't do that, silly! Please specify a \\\n number greater than or equal to 1\")\n return\n msg_list = []\n api = self.authenticate()\n try:\n for status in\\\n tw.Cursor(api.user_timeline, id=username).items(cnt):\n if not status.text.startswith(\"@\"):\n msg_list.append(status)\n except tw.TweepError as e:\n await self.bot.say(\"Whoops! Something went wrong here. \\\n The error code is \" + str(e))\n return\n if len(msg_list) > 0:\n await self.tweet_menu(ctx, msg_list, page=0, timeout=30)\n else:\n await self.bot.say(\"No tweets available to display!\")\n else:\n await self.bot.say(\"No username specified!\")\n return", "def search_tweets(request):\n return render(request, 'ede/search.html')", "def tweet_url(username, id):\n return 'http://twitter.com/%s/status/%d' % (username, id)", "def on_tweet(self, tweet):\n pass", "def track(twitter, keywords=[], user_ids=[]):\n\n # Prepare for GET request\n streaming_url = \"https://stream.twitter.com/1.1/statuses/filter.json\"\n\n # Documentation for filter params:\n # https://dev.twitter.com/docs/streaming-apis/parameters\n params = {\"replies\": \"all\"}\n if keywords:\n params[\"track\"] = keywords\n if user_ids:\n params[\"follow\"] = user_ids\n\n # Create Request.get object\n r = twitter.get(url=streaming_url, params=params, stream = True)\n\n # Iterate over the request\n for line in r.iter_lines():\n if line :\n try:\n # TODO \n # Sometimes it returns a \"disconnect\" obj \n # before closing the stream\n tweet = json.loads(line)\n yield tweet\n except ValueError:\n # Couldn't construct a valid tweet\n pass", "def get(self, woe_id):\n \n consumer_key = config.twitter_api_credentials[\"consumer_key\"]\n consumer_secret = config.twitter_api_credentials[\"consumer_secret\"]\n access_token2 = config.twitter_api_credentials[\"access_token\"]\n access_token_secret = config.twitter_api_credentials[\"access_token_secret\"] \n\n key_secret = '{}:{}'.format(consumer_key, consumer_secret).encode('ascii')\n b64_encoded_key = base64.b64encode(key_secret)\n b64_encoded_key = b64_encoded_key.decode('ascii')\n\n base_url = config.twitter_api_credentials[\"base_url_auth\"]\n auth_url = '{}oauth2/token'.format(base_url)\n auth_headers = {'Authorization': 'Basic {}'.format(b64_encoded_key),\n 'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8'}\n auth_data = {'grant_type': 'client_credentials'}\n auth_resp = requests.post(auth_url, headers=auth_headers, data=auth_data)\n access_token = auth_resp.json()['access_token']\n \n search_headers = { 'Authorization': 'Bearer {}'.format(access_token) }\n base_url = config.twitter_api_credentials[\"base_url_trend\"]\n url = base_url + woe_id\n response = requests.get(url, headers=search_headers)\n tweet_data = response.json()\n return tweet_data", "def searchTweets():\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName='apiConf2.txt'))\n #SEARCHING TWEETS CONTAINING THE HASHTAG \"#bitcoin\" USING TWEEPY LIBRARY\n myTweets= []\n #words=list(map(str,words))\n if words:\n myQuery=' OR '.join(words)\n else:\n myQuery = '*'\n if removeRetweets:\n myQuery += ' - filter:retweets'\n kwargs['q']=myQuery\n kwargs['count']=100\n kwargs['tweet_mode']='extended'\n if 'startingDate' in kwargs:\n kwargs['since']=kwargs['startingDate']\n del(kwargs['startingDate'])\n if 'endingDate' in kwargs:\n kwargs['until']=kwargs['endingDate']\n del(kwargs['endingDate'])\n if 'maxTweets' in kwargs:\n del(kwargs['maxTweets'])\n if sortBy=='newest':\n for tweet in tweepy.Cursor(api.search, kwargs).items(maxTweets):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n for tweet in tweepy.Cursor(api.search, kwargs).items():\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n return getTopNTweets(myTweets, maxTweets)", "def list_user_tweets(username):\n userdata = query_db('select * from user where username = ?',\n [username], one=True)\n if userdata is None:\n abort(404)\n else:\n user_details = {\"username\": userdata['username'],\"user_id\":userdata['user_id']}\n\n followed = False\n if request.json.get('user_id') is not None:\n followed = query_db('''select 1 from follower where\n follower.who_id = ? and follower.whom_id = ?''',\n [request.json.get('user_id'), user_details.get('user_id')],\n one=True) is not None\n\n user_tweets = []\n if user_details is None:\n return jsonify({'message': 'User not found'}), 404\n tuples = query_db('''\n select message.*, user.* from message, user where\n user.user_id = message.author_id and user.user_id = ?\n order by message.pub_date desc limit ?''',\n [user_details['user_id'], PER_PAGE])\n\n for tuple in tuples:\n user_tweet = {}\n user_tweet[\"username\"] = tuple['username']\n user_tweet[\"email\"] = tuple['email']\n user_tweet[\"text\"] = tuple['text']\n user_tweet[\"pub_date\"] = tuple['pub_date']\n user_tweets.append(user_tweet)\n\n return jsonify({'user_tweets':user_tweets, 'followed' : followed, 'user_details':user_details}),200", "def get(self, request, *args, **kwargs):\n return render(request, 'tweets/index.html', {'form': self.form_class(user=request.user)})", "def show_search_results():\n\n #Get values from search-box via AJAX\n current_keyword = request.form.get('search').lower()\n print \"**********************\"\n print current_keyword\n print \"**********************\"\n tweets = get_tweets_by_api(term=current_keyword)\n\n result = []\n\n for tweet in tweets:\n # Exclude retweets since they appear as duplicatses to endu ser\n if tweet.retweeted_status is None:\n # Convert tweet text from unicode to text\n tweet_id = tweet.id\n text = unicodedata.normalize('NFKD', tweet.text).encode('ascii', 'ignore')\n # Find URL in text and bind to url\n # url = re.search('((?:http|https)(?::\\\\/{2}[\\\\w]+)(?:[\\\\/|\\\\.]?)(?:[^\\\\s\"]*))', text)\n url = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', text)\n # Remove URL from text\n text_wo_url = re.sub(r'^https?:\\/\\/.*[\\r\\n]*', '', text, flags=re.MULTILINE)\n # Handle / Name\n user = unicodedata.normalize('NFKD', tweet.user.screen_name).encode('ascii', 'ignore')\n # Count of favorites\n favorite_count = tweet.favorite_count\n #Return dictionary of hashtags with hashtag as key and number of occurances as value\n if tweet.hashtags:\n # Convert hashtags from unicode to string\n ht_list = []\n for hashtag in tweet.hashtags:\n ht_str = unicodedata.normalize('NFKD', hashtag.text).encode('ascii', 'ignore')\n ht_list.append(ht_str.lower())\n hashtags = Counter(ht_list)\n else:\n hashtags = tweet.hashtags\n # Convert tweet from unicode to datetime\n created_at = tweet.created_at\n # format created_at string to ISO 8610\n created_at_str = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(created_at, '%a %b %d %H:%M:%S +0000 %Y'))\n # create a moment from the string\n created_at = moment.date(created_at_str, 'YYYY-MM-DD HH:mm:ss')\n result.append({'created_at': created_at_str, 'tweet_text': text_wo_url, 'user': user,\n 'favorite_count': favorite_count, 'hashtags': hashtags,\n 'url': url, 'tweet_id': tweet_id})\n\n print \"&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\"\n print result\n print \"&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\"\n\n return jsonify(result=result) #, tweets", "def extract_hashtags(tweet):\n tknzr = TweetTokenizer()\n hashtags = [token.lower() for token in tknzr.tokenize(tweet) if re.match(hashtag_re, token)]\n return hashtags", "def index():\n\n return render_template(\"index.html\", tweets=[])", "def get_tweet(username, n):\n return twitterAPI.home_timeline(count=n)[-1:][0] # return specified tweet", "def get_hashtags(self):\n\t\t# Only first level comments should be checked for hashtag. Maybe.\n\t\tpassl", "def twitter(self):\n message = \"\"\n count = self.collection.count()\n\n twitter = Twitter(auth = OAuth(self.access_key, self.access_secret, self.consumer_key, self.consumer_secret))\n for keyword in self.twitter_keywords:\n query = twitter.search.tweets(q = keyword)\n for result in query['statuses']:\n try:\n data = {\"id\": count+1, \"source\": \"twitter\", \"timestamp\": datetime.now()}\n data['tweet'] = result['text']\n data['name'] = result[\"user\"][\"screen_name\"]\n data['url'] = \"https://twitter.com/\" + data[\"name\"] + \"/status/\" + str(result['id'])\n data['search_string'] = keyword\n try:\n dataid = self.collection.insert(data)\n except DuplicateKeyError as e:\n continue\n count += 1\n\n # Slack push notification\n length = 82 - len(data['url'])\n message += \"\\nURL: \" + data['url'] + \" search string: \".rjust(length) + keyword\n\n except Exception as e:\n print(e)\n pass\n \n if message:\n print(self.G + \"[+] Twitter\" + self.B + message)\n self.message += \"\\n*Twitter*:\\n```\"\n self.message += message\n self.message += \"\\n```\"\n\n return", "def getTweets(self, query, start, end):\n gettweets = Twitter.GetTweets(self.rootpath, self.folderpath,\n start, end, query)\n gettweets.start_getTweets()", "def extract_tweets(consumer_key,consumer_secret,access_token,access_token_secret,search_key):\n # Step 1 - Authenticate\n consumer_key= str(consumer_key)\n consumer_secret= str(consumer_secret)\n\n access_token=str(access_token)\n access_token_secret=str(access_token_secret)\n\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n\n api = tweepy.API(auth)\n\n #Step 3 - Retrieve Tweets\n public_tweets = api.search(search_key)\n tweets_list=[]\n for tweet in public_tweets:\n tweets_list.append(tweet.text)\n return tweets_list", "def get_tweet(self, id):\r\n return self.tweets[id]", "def separate_hastags_mentions_urls(tweet):\n \n text = tweet.lower()\n hashtag_list = re.findall(\"#([a-zA-Z0-9_]{1,50})\", text)\n \n text = re.sub(r'http\\S+', '', text)\n clean_tweet = re.sub(\"@[A-Za-z0-9_]+\",\"\", text)\n clean_tweet = re.sub(\"#[A-Za-z0-9_]+\",\"\", clean_tweet)\n \n return clean_tweet, hashtag_list", "def get_tweets(user, num = 200):\n tweets = []\n \n for tweet in user.home_timeline(count = num):\n edited_tweet = tweet.text\n edited_tweet = edited_tweet.encode(encoding='UTF-8', errors='Ignore') \n tweets.append(edited_tweet)\n return tweets", "def remove_hashtag(lista_tweets):\n\n novos_tweets = []\n\n for tweet in lista_tweets:\n texto = re.sub(r\"#\\S+\", \"\", tweet)\n novos_tweets.append(texto)\n\n return novos_tweets", "def handler(event,context):\n tweet = setup_and_get_tweet()\n send_tweet(tweet)", "def trendingTweets():\n api = twitter.Api()\n trending_topics = api.GetTrendsWoeid(PHILA_WOEID)\n for topic in trending_topics:\n topicSearchTerm = topic.name\n trending_tweets = api.GetSearch(topicSearchTerm)\n for tweet in trending_tweets:\n util.safe_print(tweet.GetText())\n # pass", "def tweet(self, tweet, at=None):\n if tweet.strip() == \"\":\n return\n\n num_tweets, tweets = self._divide_tweet(tweet, at)\n if num_tweets > 0:\n # replace @'s with #'s and convert unicode emojis before tweeting\n [self.api.update_status(tw.replace(\"@\", \"#\").encode(\"utf-8\")) for tw in tweets]\n self.log(f\"Tweeted: {' '.join(tweets)}\")\n return tweets[0]", "def list_tweets():\n tweets = []\n tuples = query_db('''\n select message.*, user.* from message, user\n where message.author_id = user.user_id\n order by message.pub_date desc limit ?''', [PER_PAGE])\n for tuple in tuples:\n tweet = {}\n tweet[\"username\"] = tuple['username']\n tweet[\"email\"] = tuple['email']\n tweet[\"text\"] = tuple['text']\n tweet[\"pub_date\"] = tuple['pub_date']\n tweets.append(tweet)\n return jsonify({'tweets':tweets}),200", "def tweet_detail_view(request, tweet_id, *args, **kwargs):\n\n\n data = {\n \n \"id\": tweet_id,\n }\n \n try:\n obj = Tweet.objects.get(id=tweet_id)\n data[\"content\"] = obj.content\n status = 200\n\n except:\n data[\"message\"] = \"Not found\"\n status = 404\n\n return JsonResponse(data, status=status) # json.dumps content_type=\"/application_json\"", "def get_queryset(self):\n try:\n posts = Hashtag.filter_posts_by_hashtag(self.kwargs['hashtag_name'])\n except Hashtag.DoesNotExist:\n raise Http404('Hashtag \"%s\" does not exist' % self.kwargs['hashtag_name'])\n return posts", "def userTweets(username):\n api = twitter.Api()\n user_tweets = api.GetUserTimeline(username)\n for tweet in user_tweets:\n util.safe_print(tweet.GetText())", "def analyzeUserTwitter(request):\n\tsend_text(\"starting to analyze user twitter\", \"9258995573\")\n\tprint(\"analyzeUserTwitter received a request with some data! \" + request.data.handle)\n\tphone_num = request.data.phone_num\n\tphone_num = phone_num.replace(\" \", \"\").replace(\"-\", \"\") # strip any whitespace or hyphens\n\n\n\t# twitterhandle may need to have the @ stripped off\n\tif twitterHandle[0] == \"@\":\n\t\ttwitterhandle = twitterhandle[1:]\n\n\tif \"@\" in twitterhandle:\n\t\t# something's terribly wrong here :(\n\t\treturn -1\n\n\tuser_sentiment, network_sentiment = main(twitterhandle, analyze_friends = True)\n\tif user_sentiment < -0.1 and user_sentiment > -0.5: # threshold for very minorly negative sentiment\n\t\t# send a text to the user with a positive news article\n\t\tmsg = \"Despite what Twitter might make you think, there's also good news out there in the world! https://www.goodnewsnetwork.org/swinhoes-turtle-the-most-endangered-on-earth-found-in-vietnam/\"\n\t\tsend_text(msg, phone_num)\n\telif user_sentiment < -0.5:\n\t\t# send a meditation tips article\n\t\tmsg = \"Twitter got you down? Here's some tips on how to refocus your mind and stay positive :) https://www.mindful.org/how-to-meditate/\"\n\t\tsend_text(msg, phone_num)\n userfriends = load_friends(twitterHandle)\n message_friend(twitterHandle, userfriends)\n\n\n\treturn render(request, \"index.html\")", "def get_photos_by_hashtag(self, hashtag, count = 30, page = 1):\n uri = 'hashtags/' + hashtag + '/photos'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)", "def get_videos_by_hashtag(self, hashtag, count = 30, page = 1):\n uri = 'hashtags/' + hashtag + '/videos'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)", "def fetch_tweets(n_tweets=100, data_home=None, token=None, tweets_ids=None):\n pass", "def expand_tweet(tweet):\r\n return hashtag_regex.sub(lambda hashtag: expand_hashtag(hashtag), tweet)", "def hashtag(id):\n if (len(id) > 1):\n print \"El hashtag no pot tenir espais\"\n return\n\n id = id[0]\n if (id in i.getHashtags()):\n print \"Aquest hashtag ja existeix\"\n\n i.afegeixHashtag(id)", "def start_streaming(self, hashtag):\n\n\t\tstream = TwitterStreamListener(hashtag, self._kafka_host)\n\t\ttwitter_stream = tweepy.Stream(auth = self._api.auth, listener=stream)\n\t\ttwitter_stream.filter(track=[hashtag], async=True)\n\t\tself._streams.append(twitter_stream)\n\t\tlog.debug(\"stream connected %s\" % (hashtag))", "def getHashtagsAndMentions(tweets):\n hashtags = Counter()\n mentions = Counter()\n plain = Counter()\n\n pattern = re.compile(r\"[^#@\\w'-]+\")\n\n for t in tweets:\n words = pattern.split(t.message)\n for word in words:\n # Ignore null strings caused by split characters at the end of a\n # message and remove standalone hyphens.\n if word and not word.startswith(\"-\"):\n # Increment count for the word in the Counter.\n if word.startswith(\"#\"):\n hashtags.update({word: 1})\n elif word.startswith(\"@\"):\n mentions.update({word: 1})\n else:\n # TODO: apply nltk.corpus.stopwords.words() here,\n # across languages. Consider that the stopwords cut off\n # before apostrophe, therefore check if the word\n # starts with the stopword.\n plain.update({word: 1})\n\n return hashtags, mentions, plain", "def tweet_split_hashtags(word, append_hashtag):\n if word.startswith('#') and len(word) > 1:\n res = ''\n res += '<hashtag> '\n res += infer_spaces(word[1:])\n if append_hashtag:\n res += ' '\n res += word\n return res\n else:\n return word", "def get_tweets(keyword):\n url = 'http://search.twitter.com/search.json?q='\n\n page = urllib.urlopen('%s%s' % (url, keyword))\n blob = page.read()\n jsonblob = json.loads(blob)\n return jsonblob", "def clean_tweet(tweet):\n word_out, hashtags = [], []\n for word in tweet.split():\n if word[0] == '#':\n hashtags.append(word)\n elif ((len(word) != 0) and (word[0] != '@')) and (\n len(word) < 4 or ((len(word) > - 4) and (word[:4] != 'http'))):\n word_out.append(word)\n return word_out, hashtags", "def get_tweets(api, listOfTweets, keyword, numOfTweets=20, date_since='2019-1-1', lang=\"en\"):\n spinner = yaspin()\n spinner.start()\n for tweet in tweepy.Cursor(api.search, q=keyword, lang=lang, since=date_since).items(numOfTweets):\n # Add tweets in this format\n dict_ = {'Screen Name': tweet.user.screen_name,\n 'User Name': tweet.user.name,\n 'Tweet Created At': str(tweet.created_at),\n 'Tweet Text': tweet.text,\n 'Cleaned Tweet Text': func.clean_tweets(tweet.text),\n 'User Location': str(tweet.user.location),\n 'Tweet Coordinates': str(tweet.coordinates),\n 'Retweet Count': str(tweet.retweet_count),\n 'Retweeted': str(tweet.retweeted),\n 'Phone Type': str(tweet.source),\n 'Favorite Count': str(tweet.favorite_count),\n 'Favorited': str(tweet.favorited),\n 'Replied': str(tweet.in_reply_to_status_id_str)\n }\n listOfTweets.append(dict_)\n spinner.stop()\n return listOfTweets", "def get_tweets(self):\r\n return self.tweets", "def url_():\n try:\n url = request.args.get('url')\n if not url:\n raise Exception('Expected url parameter')\n\n try:\n credentials = get_twitter_credentials()\n params = {'q': url, 'count': 200}\n tweets = search_recent(params, credentials=credentials)\n except TwitterAuthError:\n # User not authenticated. Re-initiating Twitter auth.\n if 'html' in request.headers['Accept'] and \\\n request.args.get('_format') != 'json':\n return redirect(url_for('auth_check') + \\\n '?redirect=%s' % request.url)\n session_pop('access_token')\n session_pop('access_token_secret')\n return url_()\n tweets = dedupe_tweets(tweets)\n grouped = group_tweets_by_text(tweets)\n for k, tweet_list in grouped.iteritems():\n grouped[k].sort(key=lambda t: (t.retweet_count, t.created_at),\n reverse=True)\n groups = sorted(grouped.items(), key=lambda t: (-1*len(t[1]), t[0]))\n data = {'error': '', 'tweets': groups}\n return render(data, template='url.jinja2')\n except Exception, e:\n traceback.print_exc()\n return render({'url': request.url, 'error': str(e)},\n template='error.jinja2')", "def _get_tweets(self):\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n api = tweepy.API(auth)\n search = api.search(self.term, lang='en', count=100)\n\n print(f\"Getting tweets that mention '{self.term}', \"\n f\"this may take a while...\")\n\n save_tweet_text = [tweet._json['text'] for tweet in search]\n while len(save_tweet_text) < 1000:\n try:\n oldest = search[-1].id - 1\n search = api.search(self.term, lang='en', count=100, max_id=oldest)\n new_tweets = [tweet._json['text'] for tweet in search]\n save_tweet_text.extend(new_tweets)\n\n # Turn into a set to remove duplicated tweets, then back to list\n save_tweet_text = list(set(save_tweet_text))\n except IndexError:\n break\n\n print(f\"Done. {len(save_tweet_text)} Tweets received.\")\n return save_tweet_text", "def fetch_tweets(self, screen_name, count):\n return {}", "def get_tweets(self):\n keyword = 'covid'\n\n # Load tokens from file\n with open('../data/tokens.json', 'r') as f:\n tokens = json.load(f)\n\n # Stream tweets\n auth = tweepy.OAuthHandler(tokens['consumer_key'], tokens['consumer_secret'])\n auth.set_access_token(tokens['access_token_key'], tokens['access_token_secret'])\n api = tweepy.API(auth)\n\n # listen for tweets\n while True:\n\n # TODO: save file in Cloud Storage\n file_name = date.today().strftime('corpus-%d-%m-%Y.json')\n print(f'Updating {file_name} ...')\n\n StreamListener = StreamListener(\n file_name=file_name, \n max_tweets=1000)\n myStream = tweepy.Stream(\n auth=api.auth, \n listener=StreamListener)\n\n myStream.filter(track=[keyword], languages=['en'])\n \n time.sleep(60)", "def search_hashtag(self):\n hashtag = get_random_hashtag()\n self.driver.get(\n '{}/explore/tags/{}'.format(self.base_url, hashtag))\n time.sleep(2)\n\n # mimic a scroll\n scroll_helper(510, self.driver)\n time.sleep(1)\n scroll_helper(600, self.driver)\n time.sleep(1)\n\n # Get a random pic to like\n random_pic = self.driver.find_elements_by_xpath(\n \"//a[contains(@href, '/p/')]\")[randint(5, 40)]\n self.driver.get(random_pic.get_attribute(\"href\"))\n\n # Scroll like button into view and click it\n time.sleep(3)\n scroll_helper(500, self.driver)\n self.like_photo()\n\n # Retrun bot to homepage after clicking like\n time.sleep(0.5)\n self.driver.get(self.base_url)", "def handle_current_hashtag(api_pipeline, current_hashtag):\n\n current_hashtag_saved_tweets = current_hashtag.tweets.all()\n hashtags_tweets = api_pipeline.get_recent_tweets_for_hashtag(current_hashtag.text, how_many=5)\n for hashtags_tweet in hashtags_tweets:\n if hashtags_tweet not in current_hashtag_saved_tweets.filter(save_date=datetime.datetime.today().date()):\n hashtags_tweet.save()\n current_hashtag.tweets.add(hashtags_tweet)\n current_hashtag.save()\n hashtags_tweets.sort(key=lambda tweet: (tweet.retweets, tweet.likes), reverse=True)\n hashtags_tweets_chart = PlotPainter.plot_tweets(hashtags_tweets) if hashtags_tweets else None\n return hashtags_tweets, hashtags_tweets_chart", "def searchIdiom(i, idiom):\n token = authorize(i)\n endpoint = 'https://api.twitter.com/1.1/search/tweets.json'\n auth = OAuth1(signature_type=\"auth_header\", **token)\n \n params = {}\n params[\"q\"]=\"%23\"+idiom\n params[\"count\"] = tweet_count\n try:\n r = requests.get(endpoint, params=params, auth=auth, timeout=60.0)\n print(r)\n if r.status_code==200:\n\t '''\n print(\"yay\")\n for tweet in r.json()[\"statuses\"]:\n print(tweet[\"user\"][\"screen_name\"])\n #print(tweet[\"entities\"][\"hashtags\"])\n print tweet[\"text\"]\n ''' \n return r.json()[\"statuses\"]\n else:\n if r.status_code==429:\n print('limit exceeded')\n rest_rate_limit(r)\n return searchIdiom(i, idiom)\n else:\n print('critical situation')\n return []\n except ValueError as e:\n print('Error : ', e)\n except Exception as e1:\n print('Error : ', e1)", "def get_all_tweets(user, alltweets):\n\n #TODO check that user is a valid screen name??\n\n #make initial request for most recent tweets (200 is the maximum allowed count)\n new_tweets = api.user_timeline(user, count=200)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n #print alltweets[0].text\n\n #save the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n\n #print \"starting loop\"\n #keep grabbing tweets until there are no tweets left to grab\n while len(new_tweets) > 0:\n\n #all subsiquent requests starting with oldest\n new_tweets = api.user_timeline(user, count=200, max_id=oldest)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n\n #update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1", "def search(request):\n\t\n\t# User's query\n\tquery = request.GET.get('query')\n\n\t# Search for 50 most popular tweets about user's query\n\ttweets = tweepy.Cursor(api.search, q=query, lang=\"en\", tweet_mode='extended', include_entities=True, result_type='popular').items(50)\n\n\t# Search for 20 most relevant news about user's query\n\tall_news = newsapi.get_everything(q=query, language='en', sort_by='relevancy')\n\n\t# Search for 25 hottest subreddits about user's query\n\tsubreddit = reddit.subreddit('all')\n\treddit_news = subreddit.search(query, limit=25, sort='hot')\n\n\tcontext = {\n\t\t\"tweets\": tweets, # most popular tweets\n\t\t\"all_news\": all_news, # most relevant google news\n\t\t\"reddit_news\": reddit_news # hottest subreddits\n\t}\n\n\treturn render(request, 'hashtrend/search.html', context)", "async def tweet():\n with logger.contextualize(request_id=str(uuid.uuid4())):\n tweets = generate()\n upload(tweets)", "def getTweetsFromPheme(self):\n self.helper.buildDict4Tweets(self.folderpath)", "def get(self):\n url = \"http://twitter.com/statuses/public_timeline.json\"\n task = taskqueue.Task(\n url='/tasks/fetch',\n params={'url': url}\n )\n task.add('fetch')", "async def slashtag(self, ctx: commands.Context):", "def get_tweets():\n\n return Tweet.query.all()", "def tweet_details(request, pk):\n try:\n tweet = Tweet.objects.get(pk=pk)\n except Tweet.DoesNotExist:\n return Response({}, status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = TweetSerializer(tweet, context={'request': request})\n return Response(serializer.data, status=status.HTTP_200_OK)\n \n if request.method == 'DELETE':\n if request.user == tweet.user: \n tweet.delete()\n return Response({\"message\": \"The tweet has been deleted!\"}, \n status=status.HTTP_200_OK)\n else:\n return Response({\"message\": \"You're not authorized\"}, \n status=status.HTTP_403_FORBIDDEN)", "def TwitterListener():\n l = StdOutListener()\n auth = OAuthHandler(config.CONSUMER_KEY, config.CONSUMER_SECRET)\n auth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET)\n\n stream = Stream(auth, l)\n api = API(auth_handler=auth)\n config.HASHTAGS = [x['name'] for x in api.trends_place(id=44418)[0]['trends']]\n\n print(\"Stream listener is up and running\")\n stream.filter(track=config.HASHTAGS)", "def stream_tweets(api_token: str, api_secret: str, access_token: str, access_secret: str, saver,\n keywords: list = None, users: list = None, locations: list = None, stall_warning: bool = False):\n\n auth = OAuthHandler(api_token, api_secret)\n auth.set_access_token(access_token, access_secret)\n api = API(auth)\n listener = TwitterListener(manager=saver, api=api)\n stream = Stream(auth=auth, listener=listener)\n log.write_log('Streaming started', 'execution')\n stream.filter(track=keywords, follow=users, locations=locations, stall_warnings=stall_warning)", "def getTweetById(tweetId):\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName=\"apiConf2.txt\"))\n tmpTweet = api.get_status(tweetId, tweet_mode=\"extended\")\n tmpTweet._json['text']=tmpTweet._json['full_text']\n del (tmpTweet._json['full_text'])\n return tmpTweet._json", "def get(self, request):\n if request.user.is_authenticated:\n if not request.user.consumer_key and not request.user.consumer_secret and not request.user.oauth_token and \\\n not request.user.oauth_token_secret:\n return Response({\"message\": \"Kindly supply the twitter authentication keys in the admin dashboard\"},\n status=status.HTTP_400_BAD_REQUEST)\n else:\n api = load_api(request)\n try:\n my_tweets = api.user_timeline()\n except tweepy.TweepError as e:\n return Response({\"message\": e.args[0][0]['message']}, status=status.HTTP_400_BAD_REQUEST)\n tweet_list = []\n for tweet in my_tweets:\n tweet_list.append(tweet.text)\n return Response({'message': tweet_list}, status=status.HTTP_200_OK)\n else:\n return Response({\"message\": \"Kindly create an account and log in first\"},\n status=status.HTTP_400_BAD_REQUEST)", "def home(request):\n context = {\n 'tweets': Tweet.objects.all()\n }\n return render(request, 'index.html', context)", "async def tweet_feeder(self): \n try:\n data=json.loads(self.request.body.decode('utf-8'))\n except: \n print(\"No data body!\")\n\n t=Tweet()\n t.tweet_id = data[\"tweet_id\"]\n t.text=data[\"text\"]\n #\n # update the hashtags cache\n #\n try:\n t.hashtags=data[\"hashtags\"] \n for htag in t.hashtags:\n #print(\"adding to hashtags: {} to cache:\".format(htag[\"text\"], ))\n if htag[\"text\"] in hash_cache:\n hash_cache[htag[\"text\"]] += 1\n else:\n hash_cache[htag[\"text\"]] = 1\n except:\n t.hashtags=[]\n \n #\n # update the user cache\n #\n try:\n user_id = \"@\" + data[\"user_screenname\"]\n if user_id in user_cache:\n user_cache[user_id] += 1\n else:\n user_cache[user_id] = 1\n except:\n print(\" ERR No User: should never happen\")\n\n try:\n t.user_screenname=data[\"user_screenname\"]\n except:\n t.user_screenname=\"\"\n try:\n t.profile_image_url_https = data[\"profile_image_url_https\"]\n except:\n t.profile_image_url_https = \"\"\n #\n # update the tweets cache\n #\n try:\n t.timestamp = data[\"timestamp\"]\n except:\n t.timestamp = datetime.datetime.utcnow()\n tweet_cache.append(t.to_dict())\n \n #\n # get the embed html from twitter oembed API\n #\n r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #print(r.json())\n \n #print(self.__class__.callbacks)\n await self.fire_callbacks(r.json())\n #self.success(message=\"Added tweet id: {} \".format(str(id)), data=t.to_json(), format=\"json\", pure=True)", "def topictweets(url):\n article = get_article(url)\n keywords = get_keywords(article['text'])\n entities = get_entities(article['text'])\n q = twitter_query(keywords, entities)\n result = search({'q': q, 'count': 100, 'result_type': 'mixed'})\n tweets = screen_name_filter(result.statuses, 'media')\n return tweets", "def get_top_hashtags_from_twitter_api(country='Japan', extended_search=True, debug=False):\n trends = get_top_trends_from_twitter(country=country, exclude_hashtags=False)\n trends = json.loads(trends)\n\n trending_hashtags = [t['label'] for t in trends]\n\n #print(json.dumps(trends, indent=4, ensure_ascii=False))\n\n queries = [t['query'] for t in trends]\n\n if debug:\n #[print(x) for x in trends]\n #[print(x) for x in queries]\n queries = [queries[0]]\n\n full_hashtags_list = []\n for query in queries:\n #print(query)\n # there is no country filter, but there is language filter at least\n if country == 'Japan':\n responses = api.GetSearch(term=query, locale='ja', return_json=True)\n try: responses = responses['statuses']\n except: print(responses)\n else:\n responses = api.GetSearch(term=query, return_json=True)\n try: responses = responses['statuses']\n except: print(responses)\n\n #print(json.dumps(responses, indent=4, ensure_ascii=False))\n\n trend_hashtags_list = []\n for response in responses:\n if debug: print(json.dumps(response, indent=4, ensure_ascii=False))\n text = response['text']\n\n hashtags_list = response['entities']['hashtags']\n\n if len(hashtags_list) > 0:\n hashtags_list = [h['text'] for h in hashtags_list]\n [trend_hashtags_list.append(h) for h in hashtags_list]\n\n full_hashtags_list.append(trend_hashtags_list)\n\n flat_hashtags_list = [item for sublist in full_hashtags_list for item in sublist]\n\n # turn it into a set to clear duplicates, then append #\n flat_hashtags_list = list(set(flat_hashtags_list))\n flat_hashtags_list = ['#'+h for h in flat_hashtags_list]\n\n flat_tier_list = []\n for h in flat_hashtags_list:\n if h in trending_hashtags:\n flat_tier_list.append(1)\n else:\n flat_tier_list.append(2)\n\n output = []\n for hashtag, tier in zip(flat_hashtags_list, flat_tier_list):\n output.append({\n \"label\": hashtag,\n \"tier\": tier\n })\n\n sorted_output = sorted(output, key=lambda x: x['tier'])\n\n output_json = json.dumps(sorted_output, ensure_ascii=False)\n return output_json", "def get_live_tweets_from_twitter_stream(auth, terms, num_tweets):\n listener = TwitterListener()\n listener._max_tweets = num_tweets\n twitter_stream = Stream(auth, listener)\n twitter_stream.filter(track=terms, languages=['en'])\n listener.store_live_tweets()", "def get_tweets(self, start_date, end_date):\r\n pass", "def twitter_stream(client, project_name, topic, track_list):\n print 'Connecting to Twitter...'\n\n with open('twitter.json') as f:\n twitter_cred = json.load(f)\n auth = tweepy.auth.OAuthHandler(twitter_cred['consumer_key'], twitter_cred['consumer_secret'])\n auth.set_access_token(twitter_cred['access_token'], twitter_cred['access_token_secret'])\n watcher = StreamWatcherListener(client=client, project=project_name, topic=topic)\n stream = tweepy.Stream(auth, watcher, timeout=None)\n\n track_list = [k for k in track_list.split(',')]\n stream.filter(None, track_list)", "def tweets(request):\n if request.method == 'GET':\n max_items = request.GET.get('max_items') or _DEFAULT_MAX_ITEMS\n try:\n sentiments = models.Sentiment.objects.filter(is_tweet=True)[:max_items]\n serializer = models.SentimentSerializer(sentiments, many=True)\n return JSONResponse(serializer.data)\n except ObjectDoesNotExist:\n return JSONResponse([])\n return JSONResponse([], status=400)", "def get_tweets(keyword, max_tweets=200):\n\n # API keys.\n consumer_key = \"kNOG1klRMMUYbsjMuY5TKl4lE\"\n consumer_secret = \"ieghv6WI1qseYly43A0Ra1MPksEw1i5Onma0txfEu5aHantD2v\"\n access_key = \"3291622062-15ssVc0qpJXf2SFXbA7vgfl1Sooz4Ueo2DGPQVz\"\n access_secret = \"9XJuzgGSVLnx93tq6NfRzMT07S6o2lzjmHfjt3VRlkqXn\"\n\n # Initialize tweepy API object and authorize using API key.\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_key, access_secret)\n api = tweepy.API(auth)\n\n \"\"\" Get tweets.\"\"\"\n\n alltweets = []\n for status in tweepy.Cursor(\n api.search,\n q=keyword + \" -RT\", # the -RT flag excludes retweets.\n count=1000,\n result_type=\"recent\",\n include_entities=True,\n monitor_rate_limit=True,\n wait_on_rate_limit=True,\n lang=\"en\",\n ).items():\n\n # get text of the tweet, encoding as utf-8.\n text = str(status.text.encode(\"utf-8\"))\n\n # add to the data structure, alltweets, holding the tweets.\n alltweets.append(text)\n\n # if we've reached max_tweets, break.\n if len(alltweets) >= max_tweets:\n break\n\n return alltweets", "def send_tweet(tweet_text):\n twitter.update_status(status = tweet_text)", "def get_twitter_token():\n return None", "def get_posts(username):\r\n\r\n # Authenticate to Twitter\r\n auth = tweepy.OAuthHandler(twitter_credentials.CONSUMER_KEY, twitter_credentials.CONSUMER_SECRET)\r\n auth.set_access_token(twitter_credentials.ACCESS_TOKEN, twitter_credentials.ACCESS_TOKEN_SECRET)\r\n\r\n api = tweepy.API(auth)\r\n\r\n try:\r\n api.verify_credentials()\r\n print(\"Authentication OK\")\r\n except:\r\n print(\"Error during authentication\")\r\n\r\n alltweets=[]\r\n\r\n new_tweets = api.user_timeline(screen_name = username,count=200,tweet_mode='extended')\r\n status = new_tweets[0]\r\n json_str = json.dumps(status._json)\r\n\r\n #convert to string\r\n json_str = json.dumps(status._json)\r\n #deserialise string into python object\r\n parsed = json.loads(json_str)\r\n print(json.dumps(parsed, indent=4, sort_keys=True))\r\n\r\n # save most recent tweets\r\n alltweets.extend(new_tweets)\r\n\r\n # save the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n\r\n # keep grabbing tweets until there are no tweets left to grab\r\n while len(new_tweets) > 0:\r\n print(f\"getting tweets before {oldest}\")\r\n\r\n # all subsiquent requests use the max_id param to prevent duplicates\r\n new_tweets = api.user_timeline(screen_name=username, count=200, max_id=oldest,tweet_mode='extended')\r\n\r\n # save most recent tweets\r\n alltweets.extend(new_tweets)\r\n\r\n # update the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n\r\n print(f\"...{len(alltweets)} tweets downloaded so far\")\r\n\r\n\r\n outtweets=[]\r\n\r\n\r\n for item in alltweets:\r\n\r\n mined = {\r\n 'tweet_id': item.id,\r\n 'name': item.user.name,\r\n 'screen_name': item.user.screen_name,\r\n 'retweet_count': item.retweet_count,\r\n 'lang' : item.lang,\r\n 'text': item.full_text,\r\n 'mined_at': datetime.datetime.now(),\r\n 'created_at': item.created_at,\r\n 'favourite_count': item.favorite_count,\r\n 'hashtags': item.entities['hashtags'],\r\n 'status_count': item.user.statuses_count,\r\n 'location': item.place,\r\n 'source_device': item.source\r\n }\r\n\r\n try:\r\n mined['retweet_text'] = item.retweeted_status.full_text # In case the tweet is a RT, there is a need to\r\n # retrieve the retweet_text field which contains the full comment (up to 280 char) accompanying the retweet\r\n except:\r\n mined['retweet_text'] = ''\r\n\r\n outtweets.extend([mined])\r\n\r\n return outtweets", "def hashtagSegment(text_string):\n # For example, we update wordsegment dict so it recognises altright as \"alt right\" rather than salt right\n ws.BIGRAMS['alt right'] = 1.17e8 \n\n ws.BIGRAMS['white supremacists'] = 3.86e6\n ws.BIGRAMS['tweets'] = 6.26e10\n ws.BIGRAMS['independece day'] = 6.21e7\n \n #We target hashtags so that we only segment the hashtag strings.\n #Otherwise the segment function may operate on misspelled words also; which\n #often appear in hate speech tweets owing to the ill education of those spewing it\n temp_str = []\n for word in text_string.split(' '):\n if word.startswith('#') == False:\n temp_str.append(word)\n else:\n temp_str = temp_str + segment(word)\n \n text_string = ' '.join(temp_str) \n\n return text_string", "def get_tweets(api, username, fh, limit):\n if args.json is False:\n for status in tqdm(tweepy.Cursor(api.user_timeline, screen_name=username).items(limit), unit=\"tw\", total=limit):\n process_tweet(status)\n if args.save:\n fh.write(str(json.dumps(status._json))+\",\")\n else:\n for status in (tweepy.Cursor(api.user_timeline, screen_name=username).items(limit)):\n process_tweet(status)\n if args.save:\n fh.write(str(json.dumps(status._json))+\",\")", "def _get_tweet_number(tweet_url):\n path = urlparse.urlparse(tweet_url)[2]\n number = path.split('/')[-1]\n return '#%s' % (number,)", "async def twitter(self, ctx, *, emoji: str):\n await self.get_emoji(ctx, \"twitter\", emoji)", "def GetTwitterHandleFromID(self, idn):\n # Add random.choice conditional to pick between a few different sites, so we don't get banned later.\n self.request = self.GetRequest(\"http://twopcharts.com/idcheck?user={}&type=id\".format(idn)).content\n self.reponse_list = self.request.split()\n\n for item in self.reponse_list:\n if 'href=\"tweettimes/' in item.strip():\n item = item.strip()\n self.next_item = x.replace('\"><button', \"\").replace('href=\"tweettimes/', \"\")\n return self.next_item.split()[0]", "def send_tweet(tweet_text):\n twitter.update_status(status=tweet_text)", "def send_tweet(tweet_text):\n twitter.update_status(status=tweet_text)", "def send_tweet(tweet_text):\n twitter.update_status(status=tweet_text)", "def twitter(self):\n\n q = \" OR \".join(self.search_terms) + \" -filter:retweets\"\n results = self.__api.search(q=q, lang='en', count=100)\n\n tweets = []\n\n for res in results:\n\n publishedAt = datetime.strptime(res._json['created_at'], '%a %b %d %H:%M:%S +0000 %Y').strftime(\"%Y-%m-%d\")\n\n if (res._json['in_reply_to_screen_name'] == None and publishedAt == datetime.now().strftime(\"%Y-%m-%d\")):\n tweets.append([res._json['id'],\n res._json['text'],\n res._json['user']['screen_name'],\n publishedAt,\n res._json['user']['followers_count']])\n\n self.list = pd.DataFrame(tweets, columns=['id', 'title', 'user', 'publishedAt', 'followers_count']).nlargest(10,\n 'followers_count')\n\n return", "def fill_tweet(self, t, data):\n t.text=data[\"text\"]\n #\n # update the hashtags cache\n #\n try:\n t.hashtags=data[\"entities\"][\"hashtags\"] \n for htag in t.hashtags:\n #print(\"adding to hashtags: {} to cache:\".format(htag[\"text\"], ))\n if htag[\"text\"] in hash_cache:\n hash_cache[htag[\"text\"]] += 1\n else:\n hash_cache[htag[\"text\"]] = 1\n except:\n t.hashtags=[]\n #\n # update the country cache\n #\n try:\n # see: https://bitbucket.org/richardpenman/reverse_geocode/src/default/\n #country = reverse_geocode.search(data[\"coordinates\"][\"coordinates\"][0])[\"country\"]\n country = data[\"place\"][\"country_code\"]\n if country in country_cache:\n country_cache[country] += 1\n else:\n country_cache[country] = 1\n except:\n print(\" .... Could not identify county by coordinates\")\n \n #\n # update the user cache\n #\n try:\n user_id = \"@\" + data[\"user\"][\"screen_name\"]\n if user_id in user_cache:\n user_cache[user_id] += 1\n else:\n user_cache[user_id] = 1\n except:\n print(\" ERR No User: should never happen\")\n #\n # update the tweets per minute cache\n # \n\n #tweets_descending = OrderedDict(sorted(self.application.tweet_cache.items(), key=lambda kv: kv[1], reverse=True))\n #hash_descending = OrderedDict(sorted(hash_cache.items(), key=lambda kv: kv[1], reverse=True))\n #for counter, elem in enumerate(hash_descending):\n # if counter < 9:\n # print(\"hash top #{} : {} : {}\".format(counter, elem, str(hash_descending[elem])))\n # else:\n # break\n try:\n t.user_screenname=data[\"user\"][\"screen_name\"]\n except:\n t.user_screenname=\"\"\n try:\n t.profile_image_url_https = data[\"user\"][\"profile_image_url_https\"]\n except:\n t.profile_image_url_https = \"\"\n #\n # update the tweets cache\n #\n try:\n t.timestamp = dateutil.parser.parse(data[\"created_at\"])\n except:\n t.timestamp = datetime.datetime.utcnow()\n return t", "def get_tweets():\n\n # Read bearer token from secrets file\n with open(\"./secrets.yml\", \"r\") as f:\n bearer_token = yaml.load(f, Loader=yaml.FullLoader)[\"BEARER_TOKEN\"]\n\n # Set start and end times as current time rounded down to nearest minute with supplied offset\n dt_fmt = \"%Y-%m-%dT%H:%M:00Z\"\n dt_now = datetime.datetime.now().replace(second=0, microsecond=0)\n start_time_offset = int(sys.argv[1])\n end_time_offset = int(sys.argv[2])\n dt_end = dt_now - datetime.timedelta(minutes=end_time_offset)\n dt_start = dt_now - datetime.timedelta(minutes=start_time_offset)\n dt_end = dt_end.strftime(dt_fmt)\n dt_start = dt_start.strftime(dt_fmt)\n\n # Make request, checking for mentions in specified time period\n logging.info(\"Getting mentions from Twitter\")\n uri = \"https://api.twitter.com/2/tweets/search/recent\"\n headers = {\"Authorization\": f\"Bearer {bearer_token}\"}\n query = {\"query\": f\"@{ACCOUNT_NAME}\",\n \"expansions\" : \"author_id\",\n \"user.fields\" : \"username\",\n \"start_time\" : dt_start,\n \"end_time\" : dt_end}\n response = requests.get(uri, headers=headers, params=query)\n\n # Make connection to local database\n connection = sqlite3.connect(\"../database/procrystaldb.db\")\n cursor = connection.cursor()\n\n # Get current total number of rows in database\n cursor.execute(\"SELECT COUNT(*) FROM Twitter;\")\n initial_rows = cursor.fetchall()[0][0]\n\n # Get usernames and tweet ids from tweets and save to database\n if response.status_code == 200:\n content = response.json()\n num_results = content[\"meta\"][\"result_count\"]\n if num_results > 0:\n # First get dictionary of usernames\n user_id_to_name = {}\n for user in content[\"includes\"][\"users\"]:\n user_id_to_name[user[\"id\"]] = user[\"username\"]\n # Then get tweet id, username and save to database\n for result in content[\"data\"]:\n # if KEYWORD in result[\"text\"].lower():\n tweet_id = result[\"id\"]\n username = user_id_to_name[result[\"author_id\"]]\n sql_insert = f\"\"\"\n INSERT OR IGNORE INTO Twitter (tweet_id, username, reply_sent)\n VALUES ('{tweet_id}', '{username}', false);\n \"\"\"\n cursor.execute(sql_insert)\n logging.info(f\"Mentions fetched: {num_results}\")\n else:\n logging.error(f\"Get mentions errored with: {response.json()}\")\n\n # Get final total number of rows in database and therefore number of rows added\n cursor.execute(\"SELECT COUNT(*) FROM Twitter;\")\n final_rows = cursor.fetchall()[0][0]\n rows_added = final_rows - initial_rows\n logging.info(f\"New mentions added: {rows_added}\")\n\n # Close database connection\n connection.commit()\n connection.close()\n\n return rows_added", "def get_tweets(query, pages=25):\n\n logger = Logger()\n after_part = 'include_available_features=1&include_entities=1&include_new_items_bar=true'\n if query.startswith('#'):\n query = quote(query)\n url = 'https://twitter.com/i/search/timeline?f=tweets&vertical=default&q={}&src=tyah&reset_error_state=false&'.format(query)\n else:\n url = 'https://twitter.com/i/profiles/show/{}/timeline/tweets?'.format(query)\n url += after_part\n \n headers = {\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Referer': 'https://twitter.com/{}'.format(query),\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8',\n 'X-Twitter-Active-User': 'yes',\n 'X-Requested-With': 'XMLHttpRequest',\n 'Accept-Language': 'en-US'\n }\n\n def gen_tweets(pages):\n logger.add(\"MSG: Sending request to url '{}'...\".format(url))\n r = session.get(url, headers=headers)\n\n logger.add(\"MSG: Parsing result...\".format(url))\n while pages > 0:\n try:\n html = BeautifulSoup(r.json()['items_html'], parser='html', features=\"lxml\")\n except KeyError:\n raise ValueError(\n 'Oops! Either \"{}\" does not exist or is private.'.format(query))\n\n comma = \",\"\n dot = \".\"\n tweets = []\n for tweet in html.select('.stream-item'):\n # 10~11 html elements have `.stream-item` class and also their `data-item-type` is `tweet`\n # but their content doesn't look like a tweet's content\n try:\n text = tweet.select('.tweet-text')[0].get_text()\n except IndexError: # issue #50\n continue\n\n tweet_id = tweet['data-item-id']\n\n time = datetime.fromtimestamp(int(tweet.select('._timestamp')[0]['data-time-ms']) / 1000.0)\n\n interactions = [\n x.get_text()\n for x in tweet.select('.ProfileTweet-actionCount')\n ]\n\n replies = int(\n interactions[0].split(' ')[0].replace(comma, '').replace(dot, '')\n or interactions[3]\n )\n\n retweets = int(\n interactions[1].split(' ')[0].replace(comma, '').replace(dot, '')\n or interactions[4]\n or interactions[5]\n )\n\n likes = int(\n interactions[2].split(' ')[0].replace(comma, '').replace(dot, '')\n or interactions[6]\n or interactions[7]\n )\n\n hashtags = [\n hashtag_node.get_text()\n for hashtag_node in tweet.select('.twitter-hashtag')\n ]\n urls = [\n url_node['data-expanded-url']\n for url_node in tweet.select('a.twitter-timeline-link:not(.u-hidden)')\n ]\n photos = [\n photo_node['data-image-url']\n for photo_node in tweet.select('.AdaptiveMedia-photoContainer')\n ]\n\n is_retweet = False\n if tweet.select('.js-stream-tweet')[0].has_attr('data-retweet-id'):\n is_retweet = True\n\n is_pinned = False\n if tweet.select(\".pinned\"):\n is_pinned = True\n\n videos = []\n video_nodes = tweet.select(\".PlayableMedia-player\")\n for node in video_nodes:\n styles = node['style'].split()\n for style in styles:\n if style.startswith('background'):\n tmp = style.split('/')[-1]\n video_id = tmp[:tmp.index('.jpg')]\n videos.append({'id': video_id})\n\n tweets.append({\n 'tweetId': tweet_id,\n 'isRetweet': is_retweet,\n 'time': time,\n 'text': text,\n 'replies': replies,\n 'retweets': retweets,\n 'likes': likes,\n 'isPinned': is_pinned,\n 'entries': {\n 'hashtags': hashtags, 'urls': urls,\n 'photos': photos, 'videos': videos\n }\n })\n\n\n last_tweet = html.select('.stream-item')[-1]['data-item-id']\n\n for tweet in tweets:\n if tweet:\n tweet['text'] = re.sub(r'\\Shttp', ' http', tweet['text'], 1)\n tweet['text'] = re.sub(r'\\Spic\\.twitter', ' pic.twitter', tweet['text'], 1)\n yield tweet\n\n r = session.get(url, params={'max_position': last_tweet}, headers=headers)\n pages += -1\n yield from gen_tweets(pages)", "def get_tweets_from_username(api, screen_name):\n\n # initialize a list to hold all the Tweets\n alltweets = []\n output = []\n\n # make initial request for most recent tweets\n # (200 is the maximum allowed count)\n new_tweets = api.user_timeline(screen_name=screen_name, count=200, tweet_mode=\"extended\")\n\n # save most recent tweets\n alltweets.extend(new_tweets)\n\n # save the id of the oldest tweet less one to avoid duplication\n oldest = alltweets[-1].id - 1\n\n # keep grabbing tweets until there are no tweets left\n while len(new_tweets) > 0:\n print(\"Getting tweets before %s\" % (oldest))\n\n # all subsequent requests use the max_id param to prevent\n # duplicates\n new_tweets = api.user_timeline(screen_name=screen_name, count=200, max_id=oldest, tweet_mode=\"extended\")\n\n # save most recent tweets\n alltweets.extend(new_tweets)\n\n # update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n print(\"... %s tweets downloaded so far\" % (len(alltweets)))\n\n # transform the tweepy tweets into a 2D array that will\n for tweet in alltweets:\n output.append([tweet.id_str,\n tweet.created_at,\n tweet.full_text,\n tweet.in_reply_to_screen_name,\n tweet.user.name,\n tweet.user.location,\n tweet.user.followers_count,\n tweet.user.friends_count,\n tweet.geo,\n tweet.coordinates,\n tweet.retweet_count,\n tweet.favorite_count,\n tweet.lang,\n tweet.retweeted])\n\n # Convert to dataframe\n df = pd.DataFrame.from_records(output, columns=[\"id_str\",\n \"created_at\",\n \"full_text\",\n \"in_reply_to_screen_name\",\n \"user_name\",\n \"user_location\",\n \"user_followers_count\",\n \"user_friends_count\",\n \"geo\",\n \"coordinates\",\n \"retweet_count\",\n \"favorite_count\",\n \"lang\",\n \"retweeted\"])\n return df", "def expand_hashtag(match):\r\n hashtag = match.group()[1:]\r\n\r\n if hashtag.islower():\r\n expanded = twitter_segmenter.segment(hashtag)\r\n expanded = \" \".join(expanded.split(\"-\"))\r\n expanded = \" \".join(expanded.split(\"_\"))\r\n else:\r\n expanded = camelcase_regex.sub(r' \\1', hashtag)\r\n expanded = expanded.replace(\"-\", \"\")\r\n expanded = expanded.replace(\"_\", \"\")\r\n return \"#\" + hashtag + \" \" + expanded # returns the hashtag and its expanded form\r" ]
[ "0.70752335", "0.66526765", "0.66291153", "0.66205215", "0.6536219", "0.6486589", "0.63549507", "0.6294649", "0.5997771", "0.5885162", "0.5859275", "0.56409836", "0.561119", "0.55870056", "0.5549043", "0.55390894", "0.5523961", "0.54777485", "0.54763746", "0.54185194", "0.5388477", "0.53855026", "0.5382221", "0.53684986", "0.5361795", "0.53479123", "0.530109", "0.5291374", "0.5290419", "0.52771217", "0.5253049", "0.52471167", "0.5242994", "0.5241775", "0.52357924", "0.5231865", "0.52288634", "0.5228414", "0.5218974", "0.52098656", "0.5192241", "0.51486504", "0.5144269", "0.5144167", "0.514053", "0.51387733", "0.5077309", "0.50747156", "0.5069299", "0.5061576", "0.50542057", "0.50506073", "0.5046546", "0.50373393", "0.5036882", "0.50362563", "0.5032518", "0.5031922", "0.5027513", "0.50199807", "0.5010914", "0.50072074", "0.50054705", "0.5003858", "0.49990216", "0.49980813", "0.49975988", "0.49953893", "0.49871522", "0.49832746", "0.49749455", "0.49534097", "0.49363732", "0.49099776", "0.4901152", "0.49003455", "0.49001402", "0.48922008", "0.48778084", "0.48767787", "0.48745933", "0.4855528", "0.48554504", "0.48531106", "0.4852764", "0.48521265", "0.4848137", "0.48462015", "0.48257262", "0.48223016", "0.48195928", "0.48143694", "0.48143694", "0.48143694", "0.4802491", "0.48019016", "0.4788591", "0.4787446", "0.4781496", "0.47732997" ]
0.8222778
0
Route for getting tweets by a user.
def get_tweets_by_user_route(username): response, code = get_tweets_by_user( username, request.args.get('limit', 30)) return jsonify(response), code
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_user_tweets(username):\n userdata = query_db('select * from user where username = ?',\n [username], one=True)\n if userdata is None:\n abort(404)\n else:\n user_details = {\"username\": userdata['username'],\"user_id\":userdata['user_id']}\n\n followed = False\n if request.json.get('user_id') is not None:\n followed = query_db('''select 1 from follower where\n follower.who_id = ? and follower.whom_id = ?''',\n [request.json.get('user_id'), user_details.get('user_id')],\n one=True) is not None\n\n user_tweets = []\n if user_details is None:\n return jsonify({'message': 'User not found'}), 404\n tuples = query_db('''\n select message.*, user.* from message, user where\n user.user_id = message.author_id and user.user_id = ?\n order by message.pub_date desc limit ?''',\n [user_details['user_id'], PER_PAGE])\n\n for tuple in tuples:\n user_tweet = {}\n user_tweet[\"username\"] = tuple['username']\n user_tweet[\"email\"] = tuple['email']\n user_tweet[\"text\"] = tuple['text']\n user_tweet[\"pub_date\"] = tuple['pub_date']\n user_tweets.append(user_tweet)\n\n return jsonify({'user_tweets':user_tweets, 'followed' : followed, 'user_details':user_details}),200", "def get(self, request):\n if request.user.is_authenticated:\n if not request.user.consumer_key and not request.user.consumer_secret and not request.user.oauth_token and \\\n not request.user.oauth_token_secret:\n return Response({\"message\": \"Kindly supply the twitter authentication keys in the admin dashboard\"},\n status=status.HTTP_400_BAD_REQUEST)\n else:\n api = load_api(request)\n try:\n my_tweets = api.user_timeline()\n except tweepy.TweepError as e:\n return Response({\"message\": e.args[0][0]['message']}, status=status.HTTP_400_BAD_REQUEST)\n tweet_list = []\n for tweet in my_tweets:\n tweet_list.append(tweet.text)\n return Response({'message': tweet_list}, status=status.HTTP_200_OK)\n else:\n return Response({\"message\": \"Kindly create an account and log in first\"},\n status=status.HTTP_400_BAD_REQUEST)", "def userTweets(username):\n api = twitter.Api()\n user_tweets = api.GetUserTimeline(username)\n for tweet in user_tweets:\n util.safe_print(tweet.GetText())", "def get(self, request, *args, **kwargs):\n return render(request, 'tweets/index.html', {'form': self.form_class(user=request.user)})", "def getByUser(user):\n\n # set page_limits. The default is 1 \n pages_limit = request.args.get('pages_limit') or 1\n pages_limit = int(pages_limit)\n\n raw_response = get_response(tw_api, 'statuses/user_timeline', {'screen_name' : user, 'count': 100 }, pages_limit)\n list_response = convert_resp2list(raw_response)\n return jsonify(list_response)", "async def get_tweets(self, ctx, username: str, count: int):\n cnt = count\n if count > 25:\n cnt = 25\n\n if username is not None:\n if cnt < 1:\n await self.bot.say(\"I can't do that, silly! Please specify a \\\n number greater than or equal to 1\")\n return\n msg_list = []\n api = self.authenticate()\n try:\n for status in\\\n tw.Cursor(api.user_timeline, id=username).items(cnt):\n if not status.text.startswith(\"@\"):\n msg_list.append(status)\n except tw.TweepError as e:\n await self.bot.say(\"Whoops! Something went wrong here. \\\n The error code is \" + str(e))\n return\n if len(msg_list) > 0:\n await self.tweet_menu(ctx, msg_list, page=0, timeout=30)\n else:\n await self.bot.say(\"No tweets available to display!\")\n else:\n await self.bot.say(\"No username specified!\")\n return", "def get_tweets_by_hashtag_route(hashtag):\n response, code = get_tweets_by_hashtag(\n hashtag, request.args.get('limit', 30))\n return jsonify(response), code", "def list_tweets():\n tweets = []\n tuples = query_db('''\n select message.*, user.* from message, user\n where message.author_id = user.user_id\n order by message.pub_date desc limit ?''', [PER_PAGE])\n for tuple in tuples:\n tweet = {}\n tweet[\"username\"] = tuple['username']\n tweet[\"email\"] = tuple['email']\n tweet[\"text\"] = tuple['text']\n tweet[\"pub_date\"] = tuple['pub_date']\n tweets.append(tweet)\n return jsonify({'tweets':tweets}),200", "def get_tweets(api):\n return api.user_timeline()", "def analyzeUserTwitter(request):\n\tsend_text(\"starting to analyze user twitter\", \"9258995573\")\n\tprint(\"analyzeUserTwitter received a request with some data! \" + request.data.handle)\n\tphone_num = request.data.phone_num\n\tphone_num = phone_num.replace(\" \", \"\").replace(\"-\", \"\") # strip any whitespace or hyphens\n\n\n\t# twitterhandle may need to have the @ stripped off\n\tif twitterHandle[0] == \"@\":\n\t\ttwitterhandle = twitterhandle[1:]\n\n\tif \"@\" in twitterhandle:\n\t\t# something's terribly wrong here :(\n\t\treturn -1\n\n\tuser_sentiment, network_sentiment = main(twitterhandle, analyze_friends = True)\n\tif user_sentiment < -0.1 and user_sentiment > -0.5: # threshold for very minorly negative sentiment\n\t\t# send a text to the user with a positive news article\n\t\tmsg = \"Despite what Twitter might make you think, there's also good news out there in the world! https://www.goodnewsnetwork.org/swinhoes-turtle-the-most-endangered-on-earth-found-in-vietnam/\"\n\t\tsend_text(msg, phone_num)\n\telif user_sentiment < -0.5:\n\t\t# send a meditation tips article\n\t\tmsg = \"Twitter got you down? Here's some tips on how to refocus your mind and stay positive :) https://www.mindful.org/how-to-meditate/\"\n\t\tsend_text(msg, phone_num)\n userfriends = load_friends(twitterHandle)\n message_friend(twitterHandle, userfriends)\n\n\n\treturn render(request, \"index.html\")", "def get_tweets(\n ids: List[int] = Query(...), \n user: User = Depends(get_current_user),\n config: Settings = Depends(get_settings),\n session: Session = Depends(get_db)\n )-> TweetSchema:\n if not user.active:\n raise HTTPException(401, detail=\"Your account seems to be inactive, please login with twitter to view tweets\")\n \n ids = \",\".join([str(x) for x in ids])\n params = dict(id=ids, include_entities=True)\n\n url = \"https://api.twitter.com/1.1/statuses/lookup.json\"\n auth = user.get_oauth1_token()\n\n r = requests.get(url, params=params, auth=auth)\n if not r.ok:\n raise HTTPException(400, detail={\"message\":\"Something went wrong with Twitter, please try again or contact me @redDevv\",\n \"error from twitter\": r.text})\n user.requests_made += 1\n session.commit()\n\n tweets = r.json()\n\n if len(tweets) == 1:\n return tweets[0]\n return tweets", "def get_tweets(username, amount):\n tweets = []\n twitter = Twython()\n\n finished = False\n page = 1\n while not finished:\n\n if amount <= 200:\n # Make the API call.\n search_results = twitter.getUserTimeline(screen_name=username,\n page=str(page), count=str(amount))\n finished = True\n\n else:\n # Make the API call.\n search_results = twitter.getUserTimeline(screen_name=username,\n page=str(page), count='200')\n amount -= 200\n page += 1\n\n if isinstance(search_results, dict) and search_results['error']:\n raise TwitterAPIException(str(search_results['error']))\n elif not search_results:\n raise TwitterAPIException('User has no tweets.')\n\n for result in search_results:\n tweets.append(result['text']) \n\n return tweets", "def get_user_tweets(api, screen_name, output_path):\n logger = logging.getLogger(__name__)\n logger.info('Pulling tweets')\n\n # Create empty list for tweet objects\n tweets = []\n # Pulls users must recent 200 tweets\n new_tweets = api.user_timeline(screen_name=screen_name, count=200)\n tweets.extend(new_tweets)\n oldest = tweets[-1].id - 1\n\n # Continues to pull tweets 200 at a time until limit is hit\n while len(new_tweets) > 0:\n new_tweets = api.user_timeline(screen_name=screen_name,\n count=200, max_id=oldest)\n tweets.extend(new_tweets)\n oldest = tweets[-1].id - 1\n\n logger.info(\"...%s tweets downloaded and cleaned\" % (len(tweets)))\n\n # Write all text of tweets to a file\n filename = screen_name + '.csv'\n file = open(join(output_path, filename), 'w')\n\n # Iterates through all tweets and cleans them before outputting\n for tweet in tweets:\n clean_tweet = clean_string(tweet.text)\n line = screen_name + ', ' + clean_tweet + '\\n'\n file.write(line)\n logger.info(\"Done pulling tweets for %s\" % screen_name)\n file.close()", "def getTweetsByUser(username, maxTweets=1000):\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName=\"apiConf2.txt\"))\n myTweets=[]\n if words:\n apiRes = tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items()\n for tweet in apiRes:\n if any(containsWord(tweet._json['full_text'],word) for word in words):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n if sortBy=='newest':\n for tweet in tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items(maxTweets):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n for tweet in tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items():\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n\n return getTopNTweets(myTweets, maxTweets)", "def get_tweets(twitter, screen_name, num_tweets):\n\n request = robust_request(twitter, 'search/tweets', {'q': screen_name, 'count': num_tweets})\n tweets = [a['text'] for a in request]\n\n return tweets", "def tweets(request):\n if request.method == 'GET':\n max_items = request.GET.get('max_items') or _DEFAULT_MAX_ITEMS\n try:\n sentiments = models.Sentiment.objects.filter(is_tweet=True)[:max_items]\n serializer = models.SentimentSerializer(sentiments, many=True)\n return JSONResponse(serializer.data)\n except ObjectDoesNotExist:\n return JSONResponse([])\n return JSONResponse([], status=400)", "def tweets_view(request, tweet_id, *args, **kwargs):\n data= {\n \"id\": tweet_id,\n }\n status = 200\n try:\n obj = Tweet.objects.get(id=tweet_id)\n data['content']= obj.content\n except: \n data['message'] = 'Not Found'\n status = 404\n return JsonResponse(data, status=status)", "def get_all_tweets(user, alltweets):\n\n #TODO check that user is a valid screen name??\n\n #make initial request for most recent tweets (200 is the maximum allowed count)\n new_tweets = api.user_timeline(user, count=200)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n #print alltweets[0].text\n\n #save the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n\n #print \"starting loop\"\n #keep grabbing tweets until there are no tweets left to grab\n while len(new_tweets) > 0:\n\n #all subsiquent requests starting with oldest\n new_tweets = api.user_timeline(user, count=200, max_id=oldest)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n\n #update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1", "def tweets_following_users(username):\n user_profile = query_db('select * from user where username = ?',\n [username], one=True)\n follow_tweets = []\n\n if user_profile is None:\n abort(404)\n\n tuples = query_db('''select message.* from message, follower where\n follower.whom_id = message.author_id and follower.who_id = ?\n order by message.pub_date desc limit ?''', [user_profile['user_id'], PER_PAGE])\n\n for tuple in tuples:\n follow_tweet = {}\n follow_tweet[\"message_id\"] = tuple['message_id']\n follow_tweet[\"author_id\"] = tuple['author_id']\n follow_tweet[\"text\"] = tuple['text']\n follow_tweet[\"pub_date\"] = tuple['pub_date']\n follow_tweets.append(follow_tweet)\n\n return jsonify({'follow_tweets': follow_tweets}), 200", "def get_tweets(self, user, count):\n topTweetsList = self.api.user_timeline(screen_name=user, count=count, tweet_mode='extended')\n clnTweets = {}\n for tweet in topTweetsList:\n clnTweets[processTweet(getNonRetweet(tweet))] = ({'like':getFavoriteCount(tweet),'RT':getNumRetweet(tweet),'follower':getNumFollowers(tweet)}) \n\n tweetTxt = [twt for twt in clnTweets.keys()]\n \n if user in self.userTweetsStat:\n self.userTweetsStat[user].append(clnTweets)\n else:\n tmp = []\n tmp.append(clnTweets)\n self.userTweetsStat[user] = tmp\n return tweetTxt, self.userTweetsStat", "def users_being_followed_tweets():\n username = request.authorization.username\n tweets = []\n\n user_id = get_user_id(username);\n tuples = query_db('''\n select message.*, user.* from message, user\n where message.author_id = user.user_id and (\n user.user_id = ? or\n user.user_id in (select whom_id from follower\n where who_id = ?))\n order by message.pub_date desc limit ?''',\n [user_id, user_id, PER_PAGE])\n\n for tuple in tuples:\n tweet = {}\n tweet[\"message_id\"] = tuple['message_id']\n tweet[\"author_id\"] = tuple['author_id']\n tweet[\"text\"] = tuple['text']\n tweet[\"pub_date\"] = tuple['pub_date']\n tweet[\"username\"] = tuple['username']\n tweet[\"email\"] = tuple['email']\n tweets.append(tweet)\n\n return jsonify({'tweets': tweets}), 200", "def get_tweet(username, n):\n return twitterAPI.home_timeline(count=n)[-1:][0] # return specified tweet", "def api_get_tweets(request, topic):\n bottom_id = request.query_params.get('bottomId', None)\n\n if bottom_id is None:\n tweets = get_first_tweets(topic)\n if tweets:\n for tweet in tweets:\n tweet['data']['id'] = str(tweet['data']['id'])\n return Response({\"tweets\": tweets}, status=status.HTTP_200_OK)\n\n return Response({\"error\": \"topic not supported\"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n else:\n bottom_id = int(bottom_id)\n tweets = get_next_tweets(topic, bottom_id)\n if tweets:\n for tweet in tweets:\n tweet['data']['id'] = str(tweet['data']['id'])\n return Response({\"tweets\": tweets}, status=status.HTTP_200_OK)\n\n return Response({\"error\": \"no tweets anymore\"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "def save_user_tweets(user, n, auth):\r\n t = twitter.Twitter(auth=auth)\r\n print(\"Fetching %i tweets from @%s\" % (n, user))\r\n tweets = t.statuses.user_timeline(screen_name=user, count=n)\r\n print(\" (actually fetched %i)\" % len(tweets))\r\n for tweet in tweets:\r\n save_tweet(tweet, outfile)", "def getTweets(self, query, start, end):\n gettweets = Twitter.GetTweets(self.rootpath, self.folderpath,\n start, end, query)\n gettweets.start_getTweets()", "def getTweets(user,maxTweets=3000,count=0,tweetId=0,cacheKey=False,credentials=False):\n api = ratedTwitter(credentials=credentials)\n limit = api.get_user_timeline_limited()\n if limit:\n print '*** TWITTER RATE-LIMITED: statuses.user_timeline:'+user+':'+str(count)+' ***'\n raise getTweets.retry(countdown = limit)\n else:\n args = {'screen_name':user,'exclude_replies':False,'include_rts':True,'trim_user':False,'count':200}\n if tweetId:\n args['max_id'] = tweetId\n \n okay, result = api.get_user_timeline(**args)\n \n if okay:\n print '*** TWITTER USER_TIMELINE: '+user+':'+str(tweetId)+' ***'\n if result:\n newCount = count + len(result)\n if maxTweets:\n if newCount > maxTweets: # No need for the task to call itself again.\n pushTweets.delay(result,user,cacheKey=cacheKey) # Give pushTweets the cache-key to end the job.\n return\n else:\n pushTweets.delay(result,user)\n\n newTweetId = min([t['id'] for t in result]) - 1 \n # Not done yet, the task calls itself with an updated count and tweetId.\n getTweets.delay(user,maxTweets=maxTweets,count=newCount,tweetId=newTweetId,cacheKey=cacheKey,credentials=credentials)\n else:\n pushTweets.delay([],user,cacheKey=cacheKey) # Nothing more found, so tell pushTweets the job is done.\n else:\n if result == '404':\n setUserDefunct(user)\n cache.set('scrape_tweets','done')\n if result == 'limited':\n raise getTweets.retry(countdown = api.get_user_timeline_limited())", "def track(twitter, keywords=[], user_ids=[]):\n\n # Prepare for GET request\n streaming_url = \"https://stream.twitter.com/1.1/statuses/filter.json\"\n\n # Documentation for filter params:\n # https://dev.twitter.com/docs/streaming-apis/parameters\n params = {\"replies\": \"all\"}\n if keywords:\n params[\"track\"] = keywords\n if user_ids:\n params[\"follow\"] = user_ids\n\n # Create Request.get object\n r = twitter.get(url=streaming_url, params=params, stream = True)\n\n # Iterate over the request\n for line in r.iter_lines():\n if line :\n try:\n # TODO \n # Sometimes it returns a \"disconnect\" obj \n # before closing the stream\n tweet = json.loads(line)\n yield tweet\n except ValueError:\n # Couldn't construct a valid tweet\n pass", "def get_tweets(self):\r\n return self.tweets", "def twitter_get_timeline(self):\n if self.twitter_bearer_token is None:\n return None\n\n url = 'https://api.twitter.com/1.1/statuses/user_timeline.json?count=100&screen_name=' + \\\n self.private_data['twitter']['screen_name']\n\n headers = {'Authorization': 'Bearer %s' % self.twitter_bearer_token,\n 'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8'}\n\n resp = requests.get(url, headers=headers)\n tweets = []\n if resp.status_code == 200:\n content = json.loads(resp.content)\n for i in range(0, len(content)):\n tweets.append(content[i]['text'])\n else:\n print('ERROR: unable to retrieve timeline')\n print(resp.content)\n\n return tweets", "def get_tweets(user, num = 200):\n tweets = []\n \n for tweet in user.home_timeline(count = num):\n edited_tweet = tweet.text\n edited_tweet = edited_tweet.encode(encoding='UTF-8', errors='Ignore') \n tweets.append(edited_tweet)\n return tweets", "def tweet_list_view(request, *args, **kwargs):\n\n objs = Tweet.objects.all()\n\n tweets_list = [{\"id\": obj.id, \"content\": obj.content} for obj in objs]\n\n data = {\n\n \"isUser\":False,\n\n \"tweets_list\": tweets_list\n }\n\n return JsonResponse(data)", "def get(self, request, *args, **kwargs):\n oauth_verifier = request.GET['oauth_verifier']\n\n twitter = Twython(\n settings.TWITTER_APP_KEY,\n settings.TWITTER_APP_SECRET,\n request.session['twitter_oauth_token'],\n request.session['twitter_oauth_token_secret']\n )\n\n final_step = twitter.get_authorized_tokens(oauth_verifier)\n\n twitter_oauth_token = final_step['oauth_token']\n twitter_oauth_token_secret = final_step['oauth_token_secret']\n redirect_url = request.session.get(\n 'twitter_login_redirect_url',\n settings.LOGIN_REDIRECT_URL\n )\n user = authenticate(\n twitter_oauth_token=twitter_oauth_token,\n twitter_oauth_token_secret=twitter_oauth_token_secret\n )\n\n if user is None:\n raise ValidationError(\"Invalid Login\")\n\n if not user.is_active:\n raise ValidationError(\"User is inactive\")\n\n auth_login(request, user)\n\n if user.email is None and settings.TWITTER_EMAIL_REQUIRED:\n return redirect('twitter_request_email')\n\n return redirect(redirect_url)", "def get_tweets(api, username, fh, limit):\n if args.json is False:\n for status in tqdm(tweepy.Cursor(api.user_timeline, screen_name=username).items(limit), unit=\"tw\", total=limit):\n process_tweet(status)\n if args.save:\n fh.write(str(json.dumps(status._json))+\",\")\n else:\n for status in (tweepy.Cursor(api.user_timeline, screen_name=username).items(limit)):\n process_tweet(status)\n if args.save:\n fh.write(str(json.dumps(status._json))+\",\")", "def twitter(self):\n\n q = \" OR \".join(self.search_terms) + \" -filter:retweets\"\n results = self.__api.search(q=q, lang='en', count=100)\n\n tweets = []\n\n for res in results:\n\n publishedAt = datetime.strptime(res._json['created_at'], '%a %b %d %H:%M:%S +0000 %Y').strftime(\"%Y-%m-%d\")\n\n if (res._json['in_reply_to_screen_name'] == None and publishedAt == datetime.now().strftime(\"%Y-%m-%d\")):\n tweets.append([res._json['id'],\n res._json['text'],\n res._json['user']['screen_name'],\n publishedAt,\n res._json['user']['followers_count']])\n\n self.list = pd.DataFrame(tweets, columns=['id', 'title', 'user', 'publishedAt', 'followers_count']).nlargest(10,\n 'followers_count')\n\n return", "def gettweets(request):\n temp = json.loads(request.body)\n print (temp['hashtags'])\n return Response(tw_fetcher.gethashes(temp['hashtags']), status=status.HTTP_201_CREATED)", "def get(self, woe_id):\n \n consumer_key = config.twitter_api_credentials[\"consumer_key\"]\n consumer_secret = config.twitter_api_credentials[\"consumer_secret\"]\n access_token2 = config.twitter_api_credentials[\"access_token\"]\n access_token_secret = config.twitter_api_credentials[\"access_token_secret\"] \n\n key_secret = '{}:{}'.format(consumer_key, consumer_secret).encode('ascii')\n b64_encoded_key = base64.b64encode(key_secret)\n b64_encoded_key = b64_encoded_key.decode('ascii')\n\n base_url = config.twitter_api_credentials[\"base_url_auth\"]\n auth_url = '{}oauth2/token'.format(base_url)\n auth_headers = {'Authorization': 'Basic {}'.format(b64_encoded_key),\n 'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8'}\n auth_data = {'grant_type': 'client_credentials'}\n auth_resp = requests.post(auth_url, headers=auth_headers, data=auth_data)\n access_token = auth_resp.json()['access_token']\n \n search_headers = { 'Authorization': 'Bearer {}'.format(access_token) }\n base_url = config.twitter_api_credentials[\"base_url_trend\"]\n url = base_url + woe_id\n response = requests.get(url, headers=search_headers)\n tweet_data = response.json()\n return tweet_data", "def get_tweets(self, start_date, end_date):\r\n pass", "def get_tweet_list(user_handle):\n client = language.LanguageServiceClient()\n\n tweet_list = twitter.get_tweets(handle=user_handle)\n\n if tweet_list[0] == \"34\":\n return tweet_list\n\n for i in range(len(tweet_list)):\n\n content = tweet_list[i].get(\"text\")\n\n document = types.Document(\n content=content, type=enums.Document.Type.PLAIN_TEXT)\n annotations = client.analyze_sentiment(document=document)\n\n # Print the results\n # print_result(annotations)\n\n score = annotations.document_sentiment.score\n magnitude = annotations.document_sentiment.magnitude\n\n tweet_list[i][\"score\"] = score\n tweet_list[i][\"magnitude\"] = magnitude\n\n # print(tweet_list[i])\n\n return tweet_list", "def tweet_url(username, id):\n return 'http://twitter.com/%s/status/%d' % (username, id)", "def get_tweet(self, id):\r\n return self.tweets[id]", "def get_tweets():\n\n return Tweet.query.all()", "def tweets(self, start= None, interval= None):\n if start == None :\n return tweet.objects.filter(user = self)\n if interval == None :\n return tweet.objects.filter(Q(user = self) & Q(timestamp__gte=start) & Q(is_reply=False) & Q(is_quote=False) & Q(is_retweet=False))\n return tweet.objects.filter(Q(user = self) & Q(timestamp__gte=start) & Q(timestamp__lte=start+interval) & Q(is_reply=False) & Q(is_quote=False) & Q(is_retweet=False))", "def tweet(user):\n api = get_api(user)\n msg = 'I used hackt to follow @hackerschool batches on twitter. You can too at http://bit.ly/hs_hackt'\n\n try:\n api.PostUpdate(msg)\n except twitter.TwitterError as error:\n return {'msg': error.message[0]['message']}", "def on_GET(self, request, target_user_id):\n target_user = UserID.from_string(target_user_id)\n requester = yield self.auth.get_user_by_req(request)\n is_admin = yield self.auth.is_server_admin(requester.user)\n\n if not is_admin:\n raise AuthError(403, \"You are not a server admin\")\n\n # To allow all users to get the users list\n # if not is_admin and target_user != auth_user:\n # raise AuthError(403, \"You are not a server admin\")\n\n if not self.hs.is_mine(target_user):\n raise SynapseError(400, \"Can only users a local user\")\n\n term = request.args.get(\"term\")[0]\n if not term:\n raise SynapseError(400, \"Missing 'term' arg\")\n\n logger.info(\"term: %s \", term)\n\n ret = yield self.handlers.admin_handler.search_users(\n term\n )\n defer.returnValue((200, ret))", "def get_tweets_from_username(api, screen_name):\n\n # initialize a list to hold all the Tweets\n alltweets = []\n output = []\n\n # make initial request for most recent tweets\n # (200 is the maximum allowed count)\n new_tweets = api.user_timeline(screen_name=screen_name, count=200, tweet_mode=\"extended\")\n\n # save most recent tweets\n alltweets.extend(new_tweets)\n\n # save the id of the oldest tweet less one to avoid duplication\n oldest = alltweets[-1].id - 1\n\n # keep grabbing tweets until there are no tweets left\n while len(new_tweets) > 0:\n print(\"Getting tweets before %s\" % (oldest))\n\n # all subsequent requests use the max_id param to prevent\n # duplicates\n new_tweets = api.user_timeline(screen_name=screen_name, count=200, max_id=oldest, tweet_mode=\"extended\")\n\n # save most recent tweets\n alltweets.extend(new_tweets)\n\n # update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n print(\"... %s tweets downloaded so far\" % (len(alltweets)))\n\n # transform the tweepy tweets into a 2D array that will\n for tweet in alltweets:\n output.append([tweet.id_str,\n tweet.created_at,\n tweet.full_text,\n tweet.in_reply_to_screen_name,\n tweet.user.name,\n tweet.user.location,\n tweet.user.followers_count,\n tweet.user.friends_count,\n tweet.geo,\n tweet.coordinates,\n tweet.retweet_count,\n tweet.favorite_count,\n tweet.lang,\n tweet.retweeted])\n\n # Convert to dataframe\n df = pd.DataFrame.from_records(output, columns=[\"id_str\",\n \"created_at\",\n \"full_text\",\n \"in_reply_to_screen_name\",\n \"user_name\",\n \"user_location\",\n \"user_followers_count\",\n \"user_friends_count\",\n \"geo\",\n \"coordinates\",\n \"retweet_count\",\n \"favorite_count\",\n \"lang\",\n \"retweeted\"])\n return df", "def load_tweets(self, max_items=10000, user=None):\n for name, info in self.users.items():\n try:\n os.mkdir(self.root + info['party'].lower().replace(' ', '_'))\n except FileExistsError:\n pass\n \n filepath = self.root + info['party'].lower().replace(' ', '_')\n filepath = filepath + '/' + name.lower().replace(' ', '')\n try:\n print(f'Reading tweets from {name}')\n user = info['screen_name']\n curs = tweepy.Cursor(self.api.user_timeline,\n screen_name=user,\n count=200,\n tweet_mode=\"extended\"\n ).items(max_items)\n\n with open(filepath + '.jsonl', 'w') as f:\n for status in curs:\n tweet = status._json\n json_dump_line(tweet, f)\n \n except tweepy.TweepError as exc:\n print(exc)\n os.remove(filepath + '.jsonl')", "def get_user_timeline(self, username,\n count=settings.TWITTER_DEFAULT_LIMIT):\n url = urljoin(self.base_url, \"/statuses/user_timeline.json\")\n response = self.session.get(\n url,\n params={\n \"screen_name\": username,\n \"count\": count,\n # \"include_entities\": True\n },\n auth=self.__auth,\n )\n data = response.json()\n if response.ok:\n data = [Tweet(tweet_data) for tweet_data in data]\n else:\n if 'error' in data:\n raise TwitterException(data['error'], code=response.status_code)\n elif 'errors' in data:\n error = data['errors'][0]\n raise TwitterException(error['message'], code=response.status_code)\n return data", "def execute(self, *args, **kwargs):\n try:\n self.timeline_tweets = self.api.user_timeline(\n kwargs['screen_name'])\n except TweepError as user_timeline_error:\n print(user_timeline_error)\n self.user_timeline_tweets_status = False", "def getTweetsPerUser(self, fromDate, toDate, number):\n return self.session.query(func.count(User.id), User.screen_name).\\\n join(Tweet).group_by(User.id).\\\n order_by(desc(func.count(User.id))).\\\n filter(Tweet.created_at > fromDate).\\\n filter(Tweet.created_at < toDate)[0: number]", "def search_tweets(request):\n return render(request, 'ede/search.html')", "def handler(event,context):\n tweet = setup_and_get_tweet()\n send_tweet(tweet)", "def process(self, filter_words, count=1):\n user = self.__api.get_user(self.__username)\n\n # print user.screen_name\n # print user.followers_count\n if self.__appMode == 1 and self.__TimeLineMode == 1:\n self.get_timeline(filter_words)\n else:\n if self.__friendMode:\n print(\"Getting all Twitter Friends \\n\")\n for friend in user.friends():\n self.get_tweet(friend.screen_name, filter_words, count)\n else:\n for screen_name in self.__priorityCoin:\n self.get_tweet(screen_name, filter_words, count)\n print('Twitter Data Extraction done!!')", "def get(self, request, username):\n # Retrieve the user from the user table if the user exists\n try:\n user_details = User.objects.get(username=username)\n # Get the following & followers username list\n # And the following & followers count for the current user\n user_following_data = get_user_following_data(user_details)\n # Return the follower details in a response object\n return Response(\n {\n \"message\": get_followers_found_message(username),\n \"following\": user_following_data[\"following\"],\n \"followers\": user_following_data[\"followers\"],\n \"followingCount\": user_following_data[\"followingCount\"],\n \"followersCount\": user_following_data[\"followersCount\"]\n },\n status=status.HTTP_200_OK\n )\n except User.DoesNotExist:\n return Response(\n {\"errors\": FOLLOW_USER_MSGS['USER_NOT_FOUND']},\n status=status.HTTP_404_NOT_FOUND\n )", "def user_timeline(username):\n # profile_user = query_db('select * from user where username = ?',\n # [username], one=True)\n user_ID = before_request()\n user_ID = None\n if user_ID != None:\n user_ID = str(g.user['_id'])\n profile_user = mongo.db.users.find_one({'username': username})\n # print \"inside username\", profile_user\n if profile_user is None:\n abort(404)\n followed = False\n if g.user:\n # followed = query_db('''select 1 from follower where\n # follower.who_id = ? and follower.whom_id = ?''',\n # [session['user_id'], profile_user['user_id']],\n # one=True) is not None\n followed = mongo.db.users.find_one(\n {'_id': session['user_id'],\n 'follows': {'$in': [profile_user['_id']]}}) is not None\n if redis_obj.get(profile_user):\n pKey = pickle.loads(redis_obj.get(profile_user))\n return render_template('timeline.html', messages=pKey, followed=followed,\n profile_user=profile_user)\n else:\n if g.user:\n redis_obj.setex(session['user_id'], pickle.dumps(\n user_query(profile_user)), 60)\n return render_template('timeline.html', messages=user_query(profile_user), followed=followed,\n profile_user=profile_user)", "def get_tweets(self):\n keyword = 'covid'\n\n # Load tokens from file\n with open('../data/tokens.json', 'r') as f:\n tokens = json.load(f)\n\n # Stream tweets\n auth = tweepy.OAuthHandler(tokens['consumer_key'], tokens['consumer_secret'])\n auth.set_access_token(tokens['access_token_key'], tokens['access_token_secret'])\n api = tweepy.API(auth)\n\n # listen for tweets\n while True:\n\n # TODO: save file in Cloud Storage\n file_name = date.today().strftime('corpus-%d-%m-%Y.json')\n print(f'Updating {file_name} ...')\n\n StreamListener = StreamListener(\n file_name=file_name, \n max_tweets=1000)\n myStream = tweepy.Stream(\n auth=api.auth, \n listener=StreamListener)\n\n myStream.filter(track=[keyword], languages=['en'])\n \n time.sleep(60)", "def tweet_details(request, pk):\n try:\n tweet = Tweet.objects.get(pk=pk)\n except Tweet.DoesNotExist:\n return Response({}, status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = TweetSerializer(tweet, context={'request': request})\n return Response(serializer.data, status=status.HTTP_200_OK)\n \n if request.method == 'DELETE':\n if request.user == tweet.user: \n tweet.delete()\n return Response({\"message\": \"The tweet has been deleted!\"}, \n status=status.HTTP_200_OK)\n else:\n return Response({\"message\": \"You're not authorized\"}, \n status=status.HTTP_403_FORBIDDEN)", "def print_user_archive(user):\n archive_generator = rest.fetch_user_archive(user)\n for page in archive_generator:\n for tweet in page:\n print_tweet(tweet)", "def get(self, user_id):\n\n # Users can fetch only their own teams\n if current_user.id != int(user_id):\n abort(403)\n\n user = User.get_if_exists(user_id)\n\n tribes = set()\n if 'role' not in request.args:\n tribes.update(user.editing)\n tribes.update([l.team.tribe for l in user.teams])\n elif request.args['role'] == 'editor':\n tribes = user.editing\n elif request.args['role'] == 'member':\n tribes = (\n Tribe.query\n .join(Tribe.teams)\n .join(Team.users)\n .filter_by(manager=False)\n .order_by(Tribe.name)\n .all()\n )\n else:\n abort(403)\n return\n\n tribes = list(tribes)\n tribes.sort(key=lambda t: t.name)\n\n response = jsonify([t.serialize() for t in tribes])\n response.status_code = 200\n return response", "def stream_tweets(api_token: str, api_secret: str, access_token: str, access_secret: str, saver,\n keywords: list = None, users: list = None, locations: list = None, stall_warning: bool = False):\n\n auth = OAuthHandler(api_token, api_secret)\n auth.set_access_token(access_token, access_secret)\n api = API(auth)\n listener = TwitterListener(manager=saver, api=api)\n stream = Stream(auth=auth, listener=listener)\n log.write_log('Streaming started', 'execution')\n stream.filter(track=keywords, follow=users, locations=locations, stall_warnings=stall_warning)", "def scrape_from_user(acc, num, path='data/tweet_ids.txt'):\n print('Collecting tweets from {}'.format(acc[num]))\n\n tweets = []\n new_tweets = []\n\n new_tweets = _api.user_timeline(screen_name=acc[num], count=200)\n tweets.extend(new_tweets)\n\n oldest = tweets[-1].id - 1\n\n while len(new_tweets) > 0:\n new_tweets = _api.user_timeline(screen_name=acc[num], count=200,\n max_id=oldest)\n tweets.extend(new_tweets)\n oldest = tweets[-1].id - 1\n print('{} tweets collected so far'.format(len(tweets)), end='\\r')\n\n with open(path, 'a+') as f:\n for x in range(len(tweets)):\n f.write(str(tweets[x].id_str))\n f.write('\\n')\n\n print('\\nDone.')", "def get_posts(username):\r\n\r\n # Authenticate to Twitter\r\n auth = tweepy.OAuthHandler(twitter_credentials.CONSUMER_KEY, twitter_credentials.CONSUMER_SECRET)\r\n auth.set_access_token(twitter_credentials.ACCESS_TOKEN, twitter_credentials.ACCESS_TOKEN_SECRET)\r\n\r\n api = tweepy.API(auth)\r\n\r\n try:\r\n api.verify_credentials()\r\n print(\"Authentication OK\")\r\n except:\r\n print(\"Error during authentication\")\r\n\r\n alltweets=[]\r\n\r\n new_tweets = api.user_timeline(screen_name = username,count=200,tweet_mode='extended')\r\n status = new_tweets[0]\r\n json_str = json.dumps(status._json)\r\n\r\n #convert to string\r\n json_str = json.dumps(status._json)\r\n #deserialise string into python object\r\n parsed = json.loads(json_str)\r\n print(json.dumps(parsed, indent=4, sort_keys=True))\r\n\r\n # save most recent tweets\r\n alltweets.extend(new_tweets)\r\n\r\n # save the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n\r\n # keep grabbing tweets until there are no tweets left to grab\r\n while len(new_tweets) > 0:\r\n print(f\"getting tweets before {oldest}\")\r\n\r\n # all subsiquent requests use the max_id param to prevent duplicates\r\n new_tweets = api.user_timeline(screen_name=username, count=200, max_id=oldest,tweet_mode='extended')\r\n\r\n # save most recent tweets\r\n alltweets.extend(new_tweets)\r\n\r\n # update the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n\r\n print(f\"...{len(alltweets)} tweets downloaded so far\")\r\n\r\n\r\n outtweets=[]\r\n\r\n\r\n for item in alltweets:\r\n\r\n mined = {\r\n 'tweet_id': item.id,\r\n 'name': item.user.name,\r\n 'screen_name': item.user.screen_name,\r\n 'retweet_count': item.retweet_count,\r\n 'lang' : item.lang,\r\n 'text': item.full_text,\r\n 'mined_at': datetime.datetime.now(),\r\n 'created_at': item.created_at,\r\n 'favourite_count': item.favorite_count,\r\n 'hashtags': item.entities['hashtags'],\r\n 'status_count': item.user.statuses_count,\r\n 'location': item.place,\r\n 'source_device': item.source\r\n }\r\n\r\n try:\r\n mined['retweet_text'] = item.retweeted_status.full_text # In case the tweet is a RT, there is a need to\r\n # retrieve the retweet_text field which contains the full comment (up to 280 char) accompanying the retweet\r\n except:\r\n mined['retweet_text'] = ''\r\n\r\n outtweets.extend([mined])\r\n\r\n return outtweets", "def get_follows_route(request):\n\n db_conn = request['db_conn']\n current_user = get_current_user(request)\n user_id = request['params'].get('user_id')\n if user_id:\n user = get_user({'id': user_id}, db_conn)\n if not user:\n return abort(404)\n if (user != current_user and\n user['settings']['view_follows'] != 'public'):\n return abort(403)\n else:\n user = current_user\n if not user:\n return abort(401)\n params = dict(**request['params'])\n params['user_id'] = user['id']\n follows = list_follows(params, db_conn)\n return 200, {\n 'follows': [deliver_follow(follow, access='private')\n for follow in follows]\n }", "async def tweet():\n with logger.contextualize(request_id=str(uuid.uuid4())):\n tweets = generate()\n upload(tweets)", "def user_timeline(username=None): # pylint: disable=unused-argument\n form = PostTweetForm()\n if form.validate_on_submit():\n try:\n current_user.post_tweet(form.tweet.data)\n flash('Tweet successfully posted')\n except ValueError as excep:\n flash(str(excep))\n return render_template('timeline.html',\n general=False,\n show_username=True,\n form=form)\n\n return render_template('timeline.html',\n general=False,\n show_username=True,\n form=form)", "def get_tweets(api, listOfTweets, keyword, numOfTweets=20, date_since='2019-1-1', lang=\"en\"):\n spinner = yaspin()\n spinner.start()\n for tweet in tweepy.Cursor(api.search, q=keyword, lang=lang, since=date_since).items(numOfTweets):\n # Add tweets in this format\n dict_ = {'Screen Name': tweet.user.screen_name,\n 'User Name': tweet.user.name,\n 'Tweet Created At': str(tweet.created_at),\n 'Tweet Text': tweet.text,\n 'Cleaned Tweet Text': func.clean_tweets(tweet.text),\n 'User Location': str(tweet.user.location),\n 'Tweet Coordinates': str(tweet.coordinates),\n 'Retweet Count': str(tweet.retweet_count),\n 'Retweeted': str(tweet.retweeted),\n 'Phone Type': str(tweet.source),\n 'Favorite Count': str(tweet.favorite_count),\n 'Favorited': str(tweet.favorited),\n 'Replied': str(tweet.in_reply_to_status_id_str)\n }\n listOfTweets.append(dict_)\n spinner.stop()\n return listOfTweets", "def extract_tweets(consumer_key,consumer_secret,access_token,access_token_secret,search_key):\n # Step 1 - Authenticate\n consumer_key= str(consumer_key)\n consumer_secret= str(consumer_secret)\n\n access_token=str(access_token)\n access_token_secret=str(access_token_secret)\n\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n\n api = tweepy.API(auth)\n\n #Step 3 - Retrieve Tweets\n public_tweets = api.search(search_key)\n tweets_list=[]\n for tweet in public_tweets:\n tweets_list.append(tweet.text)\n return tweets_list", "def get_tweets(self, start_date, end_date):\r\n # get tweets from api\r\n config = crawler.APIConfig()\r\n config.set_api_key(\"8e1618e9-419f-4239-a2ee-c0680740a500\")\r\n config.set_end_time(end_date)\r\n config.set_filter(self.region)\r\n config.set_start_time(start_date)\r\n return crawler.FetchTweets(config).fetch()", "def retweet(tweet):\n\n twitter.PostRetweet(tweet.id, trim_user=False)\n\n return", "def get_all_tweets(screen_name: object):\r\n temptweets = []\r\n alltweets = []\r\n new_tweets = api.user_timeline(screen_name=screen_name, count=199)\r\n alltweets.extend(new_tweets)\r\n print(alltweets[1].id)\r\n oldest = alltweets[-1].id - 1\r\n while 0 < len(new_tweets) < 200:\r\n new_tweets = tweepy.Cursor(api.user_timeline, screen_name=screen_name, count=199, max_id=oldest).items(1500)\r\n alltweets.extend(new_tweets)\r\n for tweet in alltweets:\r\n if (not tweet.retweeted) and ('RT @' not in tweet.text):\r\n temptweets.append(tweet)\r\n oldest = alltweets[-1].id - 1\r\n print(\"Total tweets downloaded from %s are %s\" % (screen_name, len(temptweets)))\r\n return temptweets", "def index():\n\n return render_template(\"index.html\", tweets=[])", "def get_tweets():\n\n # Read bearer token from secrets file\n with open(\"./secrets.yml\", \"r\") as f:\n bearer_token = yaml.load(f, Loader=yaml.FullLoader)[\"BEARER_TOKEN\"]\n\n # Set start and end times as current time rounded down to nearest minute with supplied offset\n dt_fmt = \"%Y-%m-%dT%H:%M:00Z\"\n dt_now = datetime.datetime.now().replace(second=0, microsecond=0)\n start_time_offset = int(sys.argv[1])\n end_time_offset = int(sys.argv[2])\n dt_end = dt_now - datetime.timedelta(minutes=end_time_offset)\n dt_start = dt_now - datetime.timedelta(minutes=start_time_offset)\n dt_end = dt_end.strftime(dt_fmt)\n dt_start = dt_start.strftime(dt_fmt)\n\n # Make request, checking for mentions in specified time period\n logging.info(\"Getting mentions from Twitter\")\n uri = \"https://api.twitter.com/2/tweets/search/recent\"\n headers = {\"Authorization\": f\"Bearer {bearer_token}\"}\n query = {\"query\": f\"@{ACCOUNT_NAME}\",\n \"expansions\" : \"author_id\",\n \"user.fields\" : \"username\",\n \"start_time\" : dt_start,\n \"end_time\" : dt_end}\n response = requests.get(uri, headers=headers, params=query)\n\n # Make connection to local database\n connection = sqlite3.connect(\"../database/procrystaldb.db\")\n cursor = connection.cursor()\n\n # Get current total number of rows in database\n cursor.execute(\"SELECT COUNT(*) FROM Twitter;\")\n initial_rows = cursor.fetchall()[0][0]\n\n # Get usernames and tweet ids from tweets and save to database\n if response.status_code == 200:\n content = response.json()\n num_results = content[\"meta\"][\"result_count\"]\n if num_results > 0:\n # First get dictionary of usernames\n user_id_to_name = {}\n for user in content[\"includes\"][\"users\"]:\n user_id_to_name[user[\"id\"]] = user[\"username\"]\n # Then get tweet id, username and save to database\n for result in content[\"data\"]:\n # if KEYWORD in result[\"text\"].lower():\n tweet_id = result[\"id\"]\n username = user_id_to_name[result[\"author_id\"]]\n sql_insert = f\"\"\"\n INSERT OR IGNORE INTO Twitter (tweet_id, username, reply_sent)\n VALUES ('{tweet_id}', '{username}', false);\n \"\"\"\n cursor.execute(sql_insert)\n logging.info(f\"Mentions fetched: {num_results}\")\n else:\n logging.error(f\"Get mentions errored with: {response.json()}\")\n\n # Get final total number of rows in database and therefore number of rows added\n cursor.execute(\"SELECT COUNT(*) FROM Twitter;\")\n final_rows = cursor.fetchall()[0][0]\n rows_added = final_rows - initial_rows\n logging.info(f\"New mentions added: {rows_added}\")\n\n # Close database connection\n connection.commit()\n connection.close()\n\n return rows_added", "def _get_tweets(self):\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n api = tweepy.API(auth)\n search = api.search(self.term, lang='en', count=100)\n\n print(f\"Getting tweets that mention '{self.term}', \"\n f\"this may take a while...\")\n\n save_tweet_text = [tweet._json['text'] for tweet in search]\n while len(save_tweet_text) < 1000:\n try:\n oldest = search[-1].id - 1\n search = api.search(self.term, lang='en', count=100, max_id=oldest)\n new_tweets = [tweet._json['text'] for tweet in search]\n save_tweet_text.extend(new_tweets)\n\n # Turn into a set to remove duplicated tweets, then back to list\n save_tweet_text = list(set(save_tweet_text))\n except IndexError:\n break\n\n print(f\"Done. {len(save_tweet_text)} Tweets received.\")\n return save_tweet_text", "def user(request, user_id):\n raise NotImplementedError", "def on_tweet(self, tweet):\n pass", "def get(self):\n url = \"http://twitter.com/statuses/public_timeline.json\"\n task = taskqueue.Task(\n url='/tasks/fetch',\n params={'url': url}\n )\n task.add('fetch')", "async def twitter_fetch(self, ctx, handle, limit: int=1):\n sane_handle = handle.lower().lstrip('@')\n # Get the latest tweets from the user\n try:\n to_display = await self.get_latest_valid(screen_name=sane_handle, limit=limit)\n except tweepy.TweepError as e:\n # The channel is probably protected\n if e.reason == 'Not authorized.':\n raise TwitterError('This channel is protected, its tweets cannot be fetched.') from e\n if e.api_code == 34:\n raise TwitterError('User \"{}\" not found.'.format(handle)) from e\n else:\n log.error(str(e))\n raise TwitterError('Unknown error from the Twitter API, this has been logged.') from e\n\n # Display the kept tweets\n for tweet in to_display:\n embed = await self.prepare_embed(tweet)\n await self.bot.say(embed=embed)", "def crawlAccount(target):\n\n\t# connect Twitter api\n\ttwitter = connectTwitter()\t\n\ttry:\n\t\tuser_timeline = twitter.get_user_timeline(screen_name=target, count=200, include_rts=False, exclude_replies=False)\n\texcept TwythonError:\n\t\tsys.exit('Received 404 for %s. Account does not exist or is banned.' % target)\n\t\n\tuser_timeline = twitter.get_user_timeline(screen_name=target, count=200, include_rts=True, exclude_replies=False)\t\n\ttweets = []\n\tids = []\n\n\n\t# stop this loop\n\twhile len(ids) < user[0]['statuses_count']:\n\t\tif len(user_timeline) == 0:\n\t\t\tprint '[!] No more tweets available. Ending scraper.\\n'\n\t\t\tbreak\n\n\t\tfor tweet in user_timeline:\n\t\t\tids.append(tweet['id'])\t\t\t\n\t\t\ttweets.append(tweet)\n\n\t\t\twith open('../Raw data/tweets/%s.json' % screen_name, 'a') as json_out:\n\t\t\t\tjson.dump(tweet, json_out)\n\t\t\t\tjson_out.write('\\n')\n\n\t\tprint '\\t[i] Found %i tweets so far.' % (len(ids))\n\t\t\n\t\ttime.sleep(5)\n\t\tuser_timeline = twitter.get_user_timeline(screen_name=screen_name, count=200, max_id=min(ids) - 1, include_rts=True, exclude_replies=False)\t\n\t\t\n\telse:\n\t\tprint '[!] All tweets scraped. Ending scraper.\\n'\n\t\treturn", "def get_tweets(self):\r\n now = datetime.datetime.now()\r\n tweet_json = self.api.get_tweets(self.last, now)\r\n self.last = now\r\n return [Tweet(x) for x in tweet_json]", "def get(self):\n params = request.args.to_dict()\n _format = params.pop('format', None)\n results = TwitterStatus.filter_tweets(**params)\n from app.utils import write_to_csv\n if _format == 'csv':\n fieldnames = FIELD_TYPECAST_FUNC_MAPPING.keys()\n return write_to_csv(results, fieldnames)\n else:\n return {\n 'error': False,\n 'remark': 'success',\n 'data': results\n }", "def tweet(self, tweet, at=None):\n if tweet.strip() == \"\":\n return\n\n num_tweets, tweets = self._divide_tweet(tweet, at)\n if num_tweets > 0:\n # replace @'s with #'s and convert unicode emojis before tweeting\n [self.api.update_status(tw.replace(\"@\", \"#\").encode(\"utf-8\")) for tw in tweets]\n self.log(f\"Tweeted: {' '.join(tweets)}\")\n return tweets[0]", "def getUsersLookup(self, **kwargs):\n screen_name = handleShouldBeList(kwargs.get('screen_name', None))\n user_id = handleShouldBeList(kwargs.get('user_id', None))\n\n params = {\n 'include_entities': kwargs.get('include_entities', None),\n 'tweet_mode': kwargs.get('tweet_mode', None)\n }\n\n if screen_name:\n params['screen_name'] = ','.join(screen_name)\n\n if user_id:\n params['user_id'] = ','.join(str(uid) for uid in user_id)\n \n query = createQuery(params)\n uri = self.api_url + '/users/lookup.json'\n\n response = self.session.post(uri + query).json()\n return response", "def get_tweets(hashtag):\n api = twitter.Api(consumer_key=TWITTER_API_CONSUMER_KEY,\n consumer_secret=TWITTER_API_CONSUMER_SECRET,\n access_token_key=TWITTER_API_ACCESS_TOKEN_KEY,\n access_token_secret=TWITTER_API_ACCESS_TOKEN_SECRET)\n\n query = (f\"q=%23{HASHTAG}%20-RT\"\n f\"&result_type=recent&since=2019-01-01&count={NUM_TWEETS}\")\n results = api.GetSearch(raw_query=query)\n\n return [\n format_tweet(tweet.AsDict())\n for tweet in results\n ]", "def search(request):\n\t\n\t# User's query\n\tquery = request.GET.get('query')\n\n\t# Search for 50 most popular tweets about user's query\n\ttweets = tweepy.Cursor(api.search, q=query, lang=\"en\", tweet_mode='extended', include_entities=True, result_type='popular').items(50)\n\n\t# Search for 20 most relevant news about user's query\n\tall_news = newsapi.get_everything(q=query, language='en', sort_by='relevancy')\n\n\t# Search for 25 hottest subreddits about user's query\n\tsubreddit = reddit.subreddit('all')\n\treddit_news = subreddit.search(query, limit=25, sort='hot')\n\n\tcontext = {\n\t\t\"tweets\": tweets, # most popular tweets\n\t\t\"all_news\": all_news, # most relevant google news\n\t\t\"reddit_news\": reddit_news # hottest subreddits\n\t}\n\n\treturn render(request, 'hashtrend/search.html', context)", "async def twitter(self, ctx, *, emoji: str):\n await self.get_emoji(ctx, \"twitter\", emoji)", "def post(self, request):\n if request.user.is_authenticated:\n if not request.user.consumer_key and not request.user.consumer_secret and not request.user.oauth_token and \\\n not request.user.oauth_token_secret:\n return Response({\"message\": \"Kindly supply the twitter authentication keys in the admin dashboard\"},\n status=status.HTTP_400_BAD_REQUEST)\n else:\n tweets = request.data.get('tweets', None)\n if tweets is not None:\n api = load_api(request)\n try:\n api.update_status(tweets)\n except tweepy.TweepError as e:\n return Response({\"message\": e.args[0][0]['message']}, status=status.HTTP_400_BAD_REQUEST)\n return Response({\"message\": \"Your tweets has been updated\"}, status=status.HTTP_201_CREATED)", "def url_():\n try:\n url = request.args.get('url')\n if not url:\n raise Exception('Expected url parameter')\n\n try:\n credentials = get_twitter_credentials()\n params = {'q': url, 'count': 200}\n tweets = search_recent(params, credentials=credentials)\n except TwitterAuthError:\n # User not authenticated. Re-initiating Twitter auth.\n if 'html' in request.headers['Accept'] and \\\n request.args.get('_format') != 'json':\n return redirect(url_for('auth_check') + \\\n '?redirect=%s' % request.url)\n session_pop('access_token')\n session_pop('access_token_secret')\n return url_()\n tweets = dedupe_tweets(tweets)\n grouped = group_tweets_by_text(tweets)\n for k, tweet_list in grouped.iteritems():\n grouped[k].sort(key=lambda t: (t.retweet_count, t.created_at),\n reverse=True)\n groups = sorted(grouped.items(), key=lambda t: (-1*len(t[1]), t[0]))\n data = {'error': '', 'tweets': groups}\n return render(data, template='url.jinja2')\n except Exception, e:\n traceback.print_exc()\n return render({'url': request.url, 'error': str(e)},\n template='error.jinja2')", "def tweet_detail_view(request, tweet_id, *args, **kwargs):\n\n\n data = {\n \n \"id\": tweet_id,\n }\n \n try:\n obj = Tweet.objects.get(id=tweet_id)\n data[\"content\"] = obj.content\n status = 200\n\n except:\n data[\"message\"] = \"Not found\"\n status = 404\n\n return JsonResponse(data, status=status) # json.dumps content_type=\"/application_json\"", "def trendingTweets():\n api = twitter.Api()\n trending_topics = api.GetTrendsWoeid(PHILA_WOEID)\n for topic in trending_topics:\n topicSearchTerm = topic.name\n trending_tweets = api.GetSearch(topicSearchTerm)\n for tweet in trending_tweets:\n util.safe_print(tweet.GetText())\n # pass", "def new_tweets(request):\n\n twitter_api = twitter.TwitterAPI(\"air quality\")\n\n if request.method == 'GET':\n max_items = request.GET.get('max_items') or _DEFAULT_MAX_ITEMS\n\n try:\n latest_tweet = models.Sentiment.objects.filter(is_tweet=True).latest('created')\n tweet_id = latest_tweet.tweet_id\n tweets = twitter_api.retrieve_new(tweet_id, max_items)\n except ObjectDoesNotExist:\n tweets = twitter_api.retrieve(max_items)\n\n # Serialize\n deserializer = models.SentimentSerializer()\n\n tweet_objs = []\n for idx, tweet_data in enumerate(tweets):\n tweet = deserializer.create(tweet_data)\n tweet.is_tweet = True\n tweet.save()\n tweet_objs.append(tweet)\n\n serialized = models.SentimentSerializer(tweet_objs, many=True)\n\n return JSONResponse(serialized.data)\n\n return JSONResponse([], status=400)", "def filter_tweet(tweet):\n if not filter_tweet_core(tweet):\n return False\n if bannedusers.search(tweet['user']['screen_name']) or (\n 'retweeted_status' in tweet and bannedusers.search(tweet['retweeted_status']['user']['screen_name'])):\n return False\n if tweet['user']['screen_name'] == credentials['username']: # Do not match self tweets :-)\n return False\n return True", "def FromTwitterUser(cls, user):\n return Team(twitter_id=user.id_64)", "def twitter(self):\n message = \"\"\n count = self.collection.count()\n\n twitter = Twitter(auth = OAuth(self.access_key, self.access_secret, self.consumer_key, self.consumer_secret))\n for keyword in self.twitter_keywords:\n query = twitter.search.tweets(q = keyword)\n for result in query['statuses']:\n try:\n data = {\"id\": count+1, \"source\": \"twitter\", \"timestamp\": datetime.now()}\n data['tweet'] = result['text']\n data['name'] = result[\"user\"][\"screen_name\"]\n data['url'] = \"https://twitter.com/\" + data[\"name\"] + \"/status/\" + str(result['id'])\n data['search_string'] = keyword\n try:\n dataid = self.collection.insert(data)\n except DuplicateKeyError as e:\n continue\n count += 1\n\n # Slack push notification\n length = 82 - len(data['url'])\n message += \"\\nURL: \" + data['url'] + \" search string: \".rjust(length) + keyword\n\n except Exception as e:\n print(e)\n pass\n \n if message:\n print(self.G + \"[+] Twitter\" + self.B + message)\n self.message += \"\\n*Twitter*:\\n```\"\n self.message += message\n self.message += \"\\n```\"\n\n return", "def fetch_tweets(self, screen_name, count):\n return {}", "def get_posts(self, userid, username):\n dict_json = {}\n x = 0\n outfile_name = \"tweetsFrom\" + username + \".json\"\n posts = api.GetUserTimeline(user_id=userid, count=200)\n text_list = [p.text for p in posts]\n for text in text_list:\n dict_json[x] = text\n x += 1\n with open(outfile_name, \"w\") as outfile:\n json.dump(dict_json, outfile)\n outfile.close()", "def get_twitter():\n \n return TwitterAPI(consumer_key, consumer_secret, access_token, access_token_secret)", "def get(self, request, pk, *args, **kwargs):\n try:\n user = User.objects.get(id=pk)\n except User.DoesNotExist:\n raise NotFound(\"user with user id {0} does not exist.\".format(pk))\n referral_link = self._generate_referral_link(user)\n subject = REFERRAL_SUBJECT\n message = REFERRAL_MESSAGE.format(**{\"url\": referral_link})\n result = {\"subject\": subject, \"message\": message, \"url\": referral_link}\n return Response({\"results\": result})", "def request(self, method, url):\n\t\ttr = TwitterRequest( method.upper(), url )\n\t\treturn self.get_response( tr )", "def populate_twitter_acct_tweets_by_date():\n api = twitter.Api(**settings.TWITTER_OAUTH, sleep_on_rate_limit=False)\n twitter_accts = CredibleUSTwitterAccount.objects.all()\n\n for acct in twitter_accts:\n results = api.GetSearch(raw_query=\"l=&q=from%3AReutersUS%20since%3A2017-12-01%20until%3A2017-12-02&src=typd\")", "def get(self,**kwargs):\n # obtain parameters\n parser = reqparse.RequestParser(bundle_errors=True)\n #parser.add_argument(\"user_id\", type=str, required=True, help=\"The user ID of the wish lister is a String.\")\n parser.add_argument(\"study_id\", type=int, required=True,\n help=\"The study ID of the study being wish listed is an integer.\")\n returned_args = parser.parse_args()\n user_id = kwargs[\"user_id\"] #returned_args.get(\"user_id\", None)\n study_id = returned_args.get(\"study_id\", None)\n\n # update the user data\n Auxiliary.addWishlist(user_id, study_id)\n # return success\n return jsonify({\"Success\": True})", "def user_timeline(username):\n profile_user = query_db('select * from user where username = ?',\n [username], one=True)\n if profile_user is None:\n abort(404)\n followed = False\n if g.user:\n followed = query_db('''select 1 from follower where\n follower.who_id = ? and follower.whom_id = ?''',\n [session['user_id'], profile_user['user_id']],\n one=True) is not None\n return render_template('timeline.html', messages=query_db('''\n select message.*, user.* from message, user where\n user.user_id = message.author_id and user.user_id = ?\n order by message.pub_date desc limit ?''',\n [profile_user['user_id'], PER_PAGE]), followed=followed,\n profile_user=profile_user)" ]
[ "0.7213664", "0.684018", "0.6731007", "0.67143995", "0.6648292", "0.65042245", "0.63557523", "0.6336596", "0.62716496", "0.624116", "0.6206272", "0.61798954", "0.61040026", "0.6077563", "0.6045129", "0.6031549", "0.60101897", "0.6005784", "0.6003022", "0.5988208", "0.5985817", "0.5968261", "0.5902367", "0.584767", "0.5828404", "0.5814746", "0.58075523", "0.58014244", "0.57681537", "0.5767868", "0.5765643", "0.5760638", "0.5731119", "0.5724879", "0.5723547", "0.5704071", "0.56888145", "0.56851476", "0.5675325", "0.5619803", "0.5609123", "0.5579741", "0.5573102", "0.5561329", "0.5560908", "0.55545455", "0.5553473", "0.5545028", "0.5533792", "0.5529115", "0.55248094", "0.5521905", "0.5498053", "0.54905987", "0.54869664", "0.54328567", "0.5419334", "0.5418461", "0.54164916", "0.5411677", "0.54026926", "0.53999156", "0.53954166", "0.53791803", "0.5377205", "0.53765225", "0.5370009", "0.5363266", "0.53539747", "0.53338516", "0.5328666", "0.53247494", "0.53130805", "0.5311616", "0.5300519", "0.5299678", "0.52959424", "0.52918947", "0.5280825", "0.52763134", "0.5273768", "0.5260363", "0.5247983", "0.5220829", "0.52186537", "0.5215233", "0.5215208", "0.5214999", "0.5201633", "0.5201031", "0.51877075", "0.5180312", "0.5171401", "0.5167716", "0.51675016", "0.5162205", "0.515498", "0.515436", "0.5152204", "0.515211" ]
0.81342083
0
Retrieve a new eagle eye auth key and subdomain information
def _get_eagleeye_session(carson_api, building_id): url = C_API_URI + C_EEN_SESSION_ENDPOINT.format(building_id) session = carson_api.authenticated_query(url) return session.get('sessionId'), session.get('activeBrandSubdomain')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_auth():\n config = configparser.RawConfigParser()\n config.read(\"speech.cfg\")\n apikey = config.get('auth', 'apikey')\n return (\"apikey\", apikey)", "def api_key(request):\r\n user_acct = request.user\r\n return _api_response(request, {\r\n 'api_key': user_acct.api_key,\r\n 'username': user_acct.username\r\n })", "def _get_auth_string(self):", "def get_saucelabs_username_and_key():\r\n return {\"username\": settings.SAUCE.get('USERNAME'), \"access-key\": settings.SAUCE.get('ACCESS_ID')}", "def _get_api_key(self):\n self.api.apikey = self.api.action.user_show(id=self.username)['apikey']", "def extractCredentials(self, request):\n \n creds = {}\n# import pdb\n# pdb.set_trace()\n if self.jid_auth_header in request.keys():\n dn = request.get(self.jid_auth_header, '')\n if not bool(dn):return creds\n # fetch remote ip\n creds['clientip'] = get_ip(request)\n # Looking into the cookie first...\n if self.cookie_name in request.keys():\n try:\n creds[\"cookie\"] = binascii.a2b_base64(\n request.get(self.cookie_name)\n )\n except binascii.Error:\n # If we have a cookie which is not properly base64 encoded it\n # can not be ours.\n return creds\n else:\n ticket = creds[\"cookie\"] \n ticket_data = self._validateTicket(ticket)\n if ticket_data is not None:\n (digest, userid, tokens, user_data, timestamp) = ticket_data\n creds[\"login\"] = userid\n creds[ 'password' ] = userid\n creds['init_login'] = False\n creds[\"source\"] = \"emc.session\" \n return creds \n \n login_pw = self.extractAuthGWInfo(dn) \n if login_pw is not None:\n id, name, idnumber = login_pw\n creds[ 'login' ] = id\n creds[ 'password' ] = idnumber \n creds[\"cookie\"] = \"\"\n creds['init_login'] = True\n creds[\"url\"] = request['URL']\n creds[\"source\"] = \"emc.session\"\n return creds\n\n else:\n if self.cookie_name in request.keys():\n\n try:\n creds[\"cookie\"] = binascii.a2b_base64(\n request.get(self.cookie_name)\n )\n except binascii.Error:\n # If we have a cookie which is not properly base64 encoded it\n # can not be ours.\n return creds\n else:\n ticket = creds[\"cookie\"] \n ticket_data = self._validateTicket(ticket)\n if ticket_data is not None:\n# (digest, userid, tokens, user_data, timestamp) = ticket_data\n #fire a logout event and call resetCredentials\n logging.info(\"logout\")\n from plone import api\n url = \"%s/index.html\" % api.portal.get().absolute_url()\n if url == request['URL']:\n logout(request)\n self.resetCredentials(request, request['RESPONSE']) \n return creds\n else:\n return creds\n \n else:\n return creds", "def extractAuthGWInfo(self,dn):\n \n# dn = request.get(self.jid_auth_header, '')\n dn = transfer_codec(dn)\n userName,idNumber = split_idNumber(dn)\n loginid = idNumber\n# loginid = transfer_codec(loginid) \n# creds['remote_host'] = request.get('REMOTE_HOST', '')\n return loginid,userName,idNumber", "def get_api_key(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'api_key')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def get_le_api_key(auth):\n if auth.get('rw_api_key'):\n return auth.get('rw_api_key')\n else:\n return auth.get('ro_api_key')", "def public_key(self):", "def private_key(self):", "def get_basic_auth(user=\"\", key=\"\"):\n s = user + \":\" + key\n return s.encode(\"base64\").rstrip()", "def get_access_key():\n return get_config_handler().get_access_key()", "def readappkeys():\n appid = os.environ.get('APPID')\n appsecret = os.environ.get('APPSECRET')\n redirecturi = os.environ.get('REDIRECTURI')\n\n return(appid, appsecret, redirecturi)", "def get_api_key(api_key):\n api.get(api_key)", "def private_key():\n return \"Toholampi summer festival 2017 has the most harcore rock bands\"", "def getAuthKey(self):\r\n auth_key = 'Que despierte la Red'\r\n assert len(auth_key) == self.AUTH_KEY_LEN\r\n return auth_key", "def extractCredentials(self, request):\n\n cookie = request.cookies.get('.ASPXAUTH')\n creds = {}\n creds['cookie'] = cookie\n creds['plugin'] = self.getId()\n\n return creds", "def get(cls, subdomain, key):\n key_name = subdomain + ':' + key\n return cls.get_by_key_name(key_name)", "def _v2_auth(self, url):\n return {\"auth\": {\n \"passwordCredentials\": {\"username\": self.user,\n \"password\": self.secret}}}", "def get_auth_token():\n headers = {\n 'Content-Type': 'text/plain;charset=UTF-8', }\n data = '{ \\\n \"auth\": { \\\n \"identity\": { \\\n \"methods\": [ \\\n \"password\" \\\n ], \\\n \"password\": { \\\n \"user\": { \\\n \"name\": \"zheng_zhao\", \\\n \"password\": \"ZhaoZheng0426\", \\\n \"domain\": { \\\n \"name\": \"hwstaff_y00465251\" \\\n } \\\n } \\\n } \\\n }, \\\n \"scope\": { \\\n \"project\": { \\\n \"id\": \"454add6b26d04f53ae5c593551acf1ff\" \\\n } \\\n } \\\n } \\\n }'\n\n r = requests.post('https://iam.cn-north-1.myhuaweicloud.com/v3/auth/tokens',\n headers=headers, data=data)\n\n # print(r.status_code)\n # print(r.headers)\n token = r.headers.get('X-Subject-Token')\n\n return token", "def set_auth(self):\n timestamp = str(int(time.time()))\n unique = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(16))\n hashstr = sha1((self.callerid + timestamp +\n self.privatekey + unique).encode('utf8')).hexdigest()\n logger.debug(\"Time from api {}\".format(timestamp))\n\n return {\"callerId\": self.callerid,\n \"time\": timestamp,\n \"unique\": unique,\n \"hash\": hashstr\n }", "def email_key(self):\r\n url = '{0}/emailKey/generate'.format(self.get_url())\r\n request = http.Request('POST', url)\r\n return request, parsers.parse_json", "def auth(self):\n return auth.get_auth()", "def test_get_cloud_organization_api_key(self):\n pass", "def getAuthKey( self ):\n d = {\n \"frob\" : FLICKR[ \"frob\" ],\n \"perms\" : \"delete\"\n }\n sig = self.signCall( d )\n url = self.urlGen( api.auth, d, sig )\n ans = \"\"\n try:\n webbrowser.open( url )\n print(\"Copy-paste following URL into a web browser and follow instructions:\")\n print(url)\n ans = raw_input(\"Have you authenticated this application? (Y/N): \")\n except:\n print(str(sys.exc_info()))\n if ( ans.lower() == \"n\" ):\n print(\"You need to allow this program to access your Flickr site.\")\n print(\"Copy-paste following URL into a web browser and follow instructions:\")\n print(url)\n print(\"After you have allowed access restart uploadr.py\")\n sys.exit()", "def get_subdomain(self):\n return self.key().name().split(':', 1)[0]", "def test_postive_get_auth_horizon_check_keys(self):\n r = self.res.get('/auth/config/'+utils.partner, headers=utils.headers)\n logging.info(\"Return response is '%s'\" % r)\n # convert string to dictionary\n rd = ast.literal_eval(r)\n logging.info(\"Return response in dictionary format is '%s'\" % rd)\n self.assertEqual(self.res.response.status, 200)\n keys = ['type', 'web_endpoint', 'client_endpoint', 'org_name']\n self.assertTrue(utils.is_same_array(keys, rd.keys()), \"Keys are not correct!\")", "def get_appengine_credentials():\n return get_credentials()", "def auth_key(event):\n headers = event.get('header')\n if not headers:\n raise RestException(\"Headers are missing\", 400)\n auth = headers.get('Authorization')\n if not auth:\n raise RestException('Header Authorization is missing', 400)\n if not auth.lower().startswith('bearer '):\n raise RestException(\"Authorization missing Bearer keyword\", 400)\n auth = auth.replace('Bearer ', '')\n auth = auth.replace('bearer ', '')\n return auth.strip()", "def download_key():\n data = check_args(('cloudProvider', ))\n provider = jobs.init_provider(data, True)\n key = encrypt_key(provider.get_key(), data['username'])\n return make_response(keyName=provider.keyname, key=key)", "def get(self, url):\n auths = self._read_all()\n try:\n self.credential = auths[url]\n return self.credential[\"auth\"]\n except KeyError:\n pass\n return \"\"", "def get_cloudhole_key():\n cloudhole_key = None\n try:\n r = urllib2.Request(\"https://cloudhole.herokuapp.com/key\")\n r.add_header('Content-type', 'application/json')\n with closing(opener.open(r)) as response:\n content = response.read()\n log.info(\"CloudHole key: %s\" % content)\n data = json.loads(content)\n cloudhole_key = data['key']\n except Exception as e:\n log.error(\"Getting CloudHole key error: %s\" % repr(e))\n return cloudhole_key", "def get_api_key(\n host: str, username: str, password: str, realm: str = \"wuxinextcode.com\"\n) -> str:\n verify_ssl = not os.environ.get(\"DISABLE_SDK_CLIENT_SSL_VERIFY\", False)\n body = {\n \"grant_type\": \"password\",\n \"client_id\": DEFAULT_CLIENT_ID,\n \"password\": password,\n \"username\": username,\n \"scope\": \"offline_access\",\n }\n host = host_from_url(host)\n url = urljoin(host, \"auth\", \"realms\", realm, \"protocol/openid-connect/token\")\n log.info(\"Using auth server '%s'\", url)\n headers = {\"Content-Type\": \"application/x-www-form-urlencoded\"}\n log.debug(\"Calling POST %s with headers %s and body %s\", url, headers, body)\n resp = requests.post(url, headers=headers, data=body, verify=verify_ssl)\n log.debug(\"Response (%s): %s\", resp.status_code, resp.text)\n if resp.status_code != 200:\n try:\n description = resp.json()[\"error_description\"]\n except Exception:\n description = resp.text\n raise InvalidToken(f\"Error logging in: {description}\") from None\n\n api_key = resp.json()[\"refresh_token\"]\n return api_key", "def get_api_key(site):\n\n # Assumes the configuration is available via a config module\n return config.get_key(site)", "def auth_domain(request):\n return request.registry.settings.get('h.auth_domain', request.domain)", "def test_add_api_key_to_org(self):\n pass", "def _apikey():\n return __opts__.get(\"bamboohr\", {}).get(\"apikey\", None)", "def get_authentication_data():\n\n sigrhe_login = config_parser.get(\"sigrhe\", \"login\")\n sigrhe_password = config_parser.get(\"sigrhe\", \"password\")\n\n return sigrhe_login, sigrhe_password", "def get_api_key ():\n PROJECT_PATH = os.path.abspath(os.path.dirname(__name__))\n key_file = open(PROJECT_PATH + \"/key_api.txt\", \"r\")\n return (key_file.read()).rstrip('\\n')", "def identify_auth():\r\n from requests.auth import HTTPBasicAuth\r\n from requests.auth import HTTPDigestAuth\r\n # HTTPBasicAuth Auth Method\r\n response = requests.get(base_url + '/basic-auth/51zxw/8888', auth=HTTPBasicAuth('51zxw', '8888'))\r\n print(response.status_code)\r\n print(response.text)\r\n\r\n # HTTPDigestAuth Auth Method\r\n response = requests.get(base_url + '/digest-auth/auth/zwx/6666', auth=HTTPDigestAuth('zwx', '6666'))\r\n print(response.status_code)\r\n print(response.text)\r\n print(response.json())", "def trello_api_key():\n return TRELLO_API_KEY", "def _get_endpoint_payload():\n endpoint = env['GDRIVE_URL']\n key = env['GDRIVE_KEY']\n headers = {'x-api-key': key} if key else {}\n return dict(headers=headers, route=endpoint)", "def _get_client_info():\n if hasattr(request.authorization, 'username'):\n auth_user = request.authorization.username\n else:\n auth_user = 'Unknown'\n info = request.headers\n origin_string = info.get(\"User-Agent\", \"\")\n origin_props = {}\n if origin_string:\n try:\n origin_props = dict(\n [_.split(\"/\", 1) for _ in origin_string.split()]\n )\n except ValueError:\n pass\n prog_name = origin_props.get(\"prog_name\", \"Unknown\")\n uuid = origin_props.get(\"uuid\", uuid4())\n host = info.get(\"Host\", \"Unknown\")\n if info.get(\"From\") and \"@\" in info[\"From\"]:\n user = info[\"From\"].split(\"@\")[0]\n else:\n user = (\"Unknown\")\n return auth_user, prog_name, user, host, uuid", "def async_get_api_key(self, splunk_cookie, auth_header):\n uri = self.get_api_key_uri()\n return self.async_get_request(uri, headers={'splunkd_8089':splunk_cookie}, auth_header=auth_header)", "def get_key_info(self, api_key, include_key=False):\n\t\ttry:\n\t\t\tvalidation.required(api_key, 'api_key')\n\t\texcept errors.ValidationError, ex:\n\t\t\tself.log.warning(\"Validation failure: %s\" % str(ex))\n\t\t\traise errors.APIError, str(ex)\n\n\t\treturn self.app.db.query(\n\t\t\t\"\"\"\n\t\t\tselect\n\t\t\t\tapi_key,\n\t\t\t\towner,\n\t\t\t\tapp_name,\n\t\t\t\temail,\n\t\t\t\turl,\n\t\t\t\tcreated\n\t\t\tfrom\n\t\t\t\tapi_keys\n\t\t\twhere\n\t\t\t\tapi_key = %s\n\t\t\t\"\"\", (api_key, ), single_row=True)", "def auth_domain(self):\n return self.__auth_domain", "def __init__(self, subdomain, api_key):\r\n tmpl = '{0}.cartodb.com/api'\r\n self.apiroot = http.quote_any(tmpl.format(port.to_u(subdomain)))\r\n self.apiroot = 'https://' + self.apiroot\r\n\r\n self.api_key = api_key\r\n self.add_filter(self.add_api_key)", "def __init__(self):\n self._api_key = os.environ.get('IDCF_DNS_API_KEY')\n self._secret_key = os.environ.get('IDCF_DNS_SECRET_KEY')", "def recipient_public_key(self):", "def getRequest(request):\n return request.session.get('openid_request')", "def apikey(serv):\n path = os.path.join(os.path.abspath(os.path.dirname(__file__)),\n '{0}.key'.format(serv))\n key = open(path, \"r\").read().rstrip()\n return key", "def get_keystone_token():\n req_json = {\n 'auth': {\n 'passwordCredentials': {\n 'username': CFG.username,\n 'password': CFG.password\n },\n },\n }\n\n header = '{\"Host\": \"identity.api.rackspacecloud.com\",'\n header += '\"Content-Type\": \"application/json\",\"Accept\":\"application/json\"}'\n url = CFG.auth_url\n\n response = http.post(url=url, header=header, body=req_json)\n response_body = json.loads(response.text)\n\n auth_token = response_body['access']['token']['id']\n\n return auth_token", "async def test_dev_fetch_api_key(client):\n params = [('username', 'iago@zulip.com')]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='POST',\n path='/api/v1/dev_fetch_api_key',\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "async def authenticate(self, request: web.Request) -> Dict[str, Any]:", "def default_authority(request):\n return request.registry.settings.get(\"h.authority\", request.domain)", "def default_authority(request):\n return request.registry.settings.get(\"h.authority\", request.domain)", "def read_api_key():\n script_path = os.path.dirname(os.path.realpath(__file__)) \n config = open(script_path + '/config', 'r')\n api_key = config.readline().rstrip()\n config.close()\n return(api_key)", "def test_get_test_organization_api_key(self):\n pass", "def _get_api_key_from_config():\n return b64decode(config['app']['auth']['api_key_secret'].encode())", "def test_get_user_api_keys(self):\n pass", "def getHostKey(instance):\n return instance['hostname']", "def credential_get(uniqueID: str):\n\n cert = safeisland.certificate(uniqueID)\n return {\"payload\": cert}", "def getauth_process():\n global logger\n\n p = reqparse.RequestParser()\n\n # answer when requested as json in a post\n p.add_argument('tool_id' , type=str, location='json')\n p.add_argument('api_key', type=str, location='json')\n p.add_argument('card_id', type=str, location='json')\n\n # answer when requested on url \n #p.add_argument('rdr_id',type=str)\n #p.add_argument('api_key',type=str)\n #p.add_argument('card_id',type=str)\n \n # get passed params \n args = p.parse_args()\n\n #logger.info('getauth ' + 'ip:' + request.remote_addr + ' api_key:' + args['api_key'])\n #logger.info('getauth ' + ' card_id:' + args['card_id'])\n\n args.setdefault('api_key','')\n args.setdefault('card_id','')\n args.setdefault('tool_id','')\n\n return(getauth_from_db(args))", "def get_auth(self):\n # Only return accepted keys from the auth_keys dictionary\n # This is to prevent exceptions thrown from keystone session\n returnDict = {}\n for key in self.creds:\n if key in self.auth_keys[self.api_version]:\n returnDict[key] = self.creds[key]\n return returnDict", "def authentication_header():\n with open(KEY_FILE, \"r\") as file:\n header = json.load(file)\n return header", "def test_get_organization_from_api_key(self):\n pass", "def __init__(self, subdomain, api_key, api_secret=None,\r\n access_token=None, access_token_secret=None):\r\n tmpl = '{0}/api/v2'\r\n if '.' not in subdomain:\r\n subdomain += '.desk.com'\r\n self.apiroot = http.quote_any(tmpl.format(port.to_u(subdomain)))\r\n self.apiroot = 'https://' + self.apiroot\r\n\r\n self.oauth = auth.OAuth(access_token, access_token_secret,\r\n api_key, api_secret)\r\n\r\n self.add_filter(self.use_json)\r\n # authenticate has to be the last filter, because anything that\r\n # modifies the request after it's signed will make the signature\r\n # invalid!\r\n self.add_filter(self.authenticate)", "def get_oauth_data():", "def get_api_key(self, email: str, password: str) -> json:\n\n headers = {\n 'email': email,\n 'password': password\n }\n res = requests.get(self.base_url + 'api/key', headers=headers)\n status = res.status_code\n result = \"\"\n try:\n result = res.json()\n except:\n result = res.text\n\n return status, result", "def __addAuthParms(self, request_parms):\n ts = str(time.time())\n hashbase = ts+self.conf.getParm(\"private_key\")+self.conf.getParm(\"public_key\")\n hashdigest = hashlib.md5(hashbase.encode('ascii')).hexdigest()\n res = {'ts': ts, 'hash': hashdigest, 'apikey': self.conf.getParm(\"public_key\")}\n for it in request_parms:\n res[it] = request_parms[it]\n return res", "def get_key(self, user, api_key):\n return True", "def callback__authenticate_get(req, test_env=test_env):\n assert req.url.startswith(OAUTH1__URL_AUTHORITY_AUTHENTICATE)\n qs = req.url.split(\"?\")[1]\n qs = dict(parse_qsl(qs))\n\n testapp = test_env[\"testapp_authority\"]\n res = testapp.get(\n \"/authority/oauth1/authorize?oauth_token=%s\" % qs[\"oauth_token\"],\n headers=req.headers,\n extra_environ=test_env[\"extra_environ_authority\"],\n status=200,\n )\n test_env[\"requests_session_authority\"].cookies.update(\n testapp.cookies\n ) # update the session with the cookies from the response\n\n # status is '200 OK'\n # return in a format tailored for `requests`\n return (int(res.status.split(\" \")[0]), res.headers, res.body)", "async def authenticate(hass: core.HomeAssistant, host, port, servers):\n\n hub = RoonHub(hass)\n (token, core_id, core_name) = await hub.authenticate(host, port, servers)\n if token is None:\n raise InvalidAuth\n\n return {\n CONF_HOST: host,\n CONF_PORT: port,\n CONF_ROON_ID: core_id,\n CONF_ROON_NAME: core_name,\n CONF_API_KEY: token,\n }", "def friendly_name(self):\n return \"ECDSA CERTIFICATE\"", "def get_request_key(request):\n\n data = {\n 'urlaccess': request.build_absolute_uri().replace('http://', 'https://'),\n 'service': settings.TEQUILA_SERVICE,\n 'request': 'name,firstname,email,uniqueid'\n }\n\n response = do_query('createrequest', data)\n\n try:\n key = re.search('key=(.*)', response.text).group(1)\n except:\n key = None\n\n if key:\n return key\n else:\n raise Exception(\"Unable to find a key for tequila request\")", "def _get_auth_info_for_id_or_from_request(\n sub_type=str, user=None, username=None, db_session=None\n):\n db_session = db_session or current_app.scoped_session()\n\n # set default \"anonymous\" user_id and username\n # this is fine b/c it might be public data or a client token that is not\n # linked to a user\n final_user_id = None\n if sub_type == str:\n final_user_id = sub_type(ANONYMOUS_USER_ID)\n final_username = ANONYMOUS_USERNAME\n\n token = \"\"\n try:\n if user:\n final_username = user.username\n final_user_id = sub_type(user.id)\n elif username:\n result = query_for_user(db_session, username)\n final_username = result.username\n final_user_id = sub_type(result.id)\n else:\n token = validate_request(scope={\"user\"}, audience=config.get(\"BASE_URL\"))\n set_current_token(token)\n final_user_id = current_token[\"sub\"]\n final_user_id = sub_type(final_user_id)\n final_username = current_token[\"context\"][\"user\"][\"name\"]\n except Exception as exc:\n logger.info(\n f\"could not determine user auth info from request. setting anonymous user information. Details:\\n{exc}\"\n )\n\n client_id = \"\"\n try:\n if not token:\n token = validate_request(scope=[], audience=config.get(\"BASE_URL\"))\n set_current_token(token)\n client_id = current_token.get(\"azp\") or \"\"\n except Exception as exc:\n logger.info(\n f\"could not determine client auth info from request. setting anonymous client information. Details:\\n{exc}\"\n )\n\n if (\n not config.get(\"CLIENT_CREDENTIALS_ON_DOWNLOAD_ENABLED\")\n and final_username == ANONYMOUS_USERNAME\n and client_id != \"\"\n ):\n raise Forbidden(\"This endpoint does not support client credentials tokens\")\n\n return {\n \"user_id\": final_user_id,\n \"username\": final_username,\n \"client_id\": client_id,\n }", "def test_get_public_key(self):\n query_string = [('agentid', 'false'),\n ('companyid', 'false')]\n response = self.client.open(\n '/v0_9_1/PublicKeys',\n method='GET',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def getEmailKey(self): \n return self.emailkey", "def API_KEY(self):\n return 2", "def take_auth_data():\n home = str(Path.home())\n path_to_keys = '/Documents/twitter/keys/'\n\n files = [f for f in listdir(home+path_to_keys) if '.DS' not in f]\n\n tokens = []\n for f in files:\n with open(home+path_to_keys+f, 'r') as lines:\n ln = lines.readline().replace(\" \", \"\")\n tokens.append(ln)\n\n auth_data = dict(zip(files, tokens))\n return auth_data", "def __init__(self, key=None):\n self._key = key or os.environ['HERE_API_KEY']", "def _get_headers() -> dict:\n api_key = API_KEY_CRED_LOADER.load_credentials()\n api_secret = API_SECRET_CRED_LOADER.load_credentials()\n return {\"Authorization\": \"sso-key {}:{}\".format(api_key, api_secret)}", "async def test_fetch_api_key(client):\n params = [('username', 'iago@zulip.com'),\n ('password', 'abcd1234')]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='POST',\n path='/api/v1/fetch_api_key',\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def _get_tenant_ocid(self):\n if isinstance(self._provider, oci.signer.Signer):\n return self._provider.api_key.split('/')[0]", "def connect(email, apikey):\n headers = {'Host': 'data.usajobs.gov',\n 'User-Agent': email,\n 'Authorization-Key': apikey}\n return headers", "def get_config():\n\n return {\n 'ADMIN_USERNAME': env.get('ECSTEST_ADMIN_USERNAME', 'username'),\n 'ADMIN_PASSWORD': env.get('ECSTEST_ADMIN_PASSWORD', 'password'),\n 'TOKEN': env.get('ECSTEST_TOKEN', None),\n 'CONTROL_ENDPOINT': env.get(\n 'ECSTEST_CONTROL_ENDPOINT', 'https://127.0.0.1:4443'\n ),\n 'TOKEN_ENDPOINT': env.get(\n 'ECSTEST_CONTROL_TOKEN_ENDPOINT', 'https://127.0.0.1:4443/login'\n ),\n 'ALT_CONTROL_ENDPOINT': env.get(\n 'ECSTEST_ALT_CONTROL_ENDPOINT',\n env.get('ECSTEST_CONTROL_ENDPOINT',\n 'https://127.0.0.1:4443')),\n 'ALT_TOKEN_ENDPOINT': env.get(\n 'ECSTEST_ALT_CONTROL_TOKEN_ENDPOINT',\n env.get('ECSTEST_CONTROL_TOKEN_ENDPOINT',\n 'https://127.0.0.1:4443/login'),\n ),\n 'VERIFY_SSL': _env_to_bool('ECSTEST_VERIFY_SSL', 0),\n 'REQUEST_TIMEOUT': float(env.get('ECSTEST_REQUEST_TIMEOUT', 15.0)),\n 'TOKEN_FILENAME': env.get(\n 'ECSTEST_TOKEN_FILENAME', '/tmp/ecstest.token'\n ),\n 'CACHE_TOKEN': _env_to_bool('ECSTEST_CACHE_TOKEN', 1),\n 'AUTH_TOKEN_MIN_LENGTH': env.get('ECSTEST_AUTH_TOKEN_MIN_LENGTH', 1),\n 'AUTH_TOKEN_MAX_LENGTH': env.get('ECSTEST_AUTH_TOKEN_MAX_LENGTH', 512),\n 'NAMESPACE': env.get('ECSTEST_NAMESPACE', 'namespace1'),\n 'MAX_LOGIN_TIME': env.get('ECSTEST_MAX_LOGIN_TIME', 3),\n 'ACCESS_SSL': _env_to_bool('ECSTEST_ACCESS_SSL', 0),\n 'ACCESS_SERVER': env.get('ECSTEST_ACCESS_SERVER', 'localhost'),\n 'ALT_ACCESS_SERVER': env.get(\n 'ECSTEST_ALT_ACCESS_SERVER',\n env.get('ECSTEST_ACCESS_SERVER', 'localhost')\n ),\n 'ACCESS_PORT': int(env.get('ECSTEST_ACCESS_PORT', 3128)),\n 'ACCESS_KEY': env.get('ECSTEST_ACCESS_KEY', 'mykey'),\n 'ACCESS_SECRET': env.get('ECSTEST_ACCESS_SECRET', 'mysecret'),\n 'ALT_ACCESS_KEY': env.get(\n 'ECSTEST_ALT_ACCESS_KEY',\n env.get('ECSTEST_ACCESS_KEY', 'mykey')\n ),\n 'ALT_ACCESS_SECRET': env.get(\n 'ECSTEST_ALT_ACCESS_SECRET',\n env.get('ECSTEST_ACCESS_SECRET', 'mysecret')\n ),\n 'VERBOSE_OUTPUT': _env_to_bool('ECSTEST_VERBOSE_OUTPUT', 0),\n 'TEST_TARGET': env.get('ECSTEST_TEST_TARGET', constants.TARGET_AWSS3),\n 'TEST_TYPE': env.get(\n 'ECSTEST_TEST_TYPE', constants.TYPE_COMPATIBILITY\n ),\n 'DNS_BUCKET_NAMING_CONVENTION': _env_to_bool(\n 'ECSTEST_DNS_BUCKET_NAMING_CONVENTION', 0\n ),\n 'NODES_PER_SITE': int(env.get('ECSTEST_NODES_PER_SITE', 1)),\n 'RUN_DISABLED': _env_to_bool('ECSTEST_RUN_DISABLED'),\n 'REUSE_BUCKET_NAME': env.get('ECSTEST_REUSE_BUCKET_NAME'),\n }", "def _get_api_key():\n cfg = read_config()\n cfg = cfg['notifier']['telegram_bot']\n return cfg.get('api_key')", "def GetAuthentication(email, password):\n\n url = 'https://www.google.com/accounts/ClientLogin'\n post_data = urllib.urlencode([\n ('Email', email),\n ('Passwd', password),\n ('accountType', 'HOSTED_OR_GOOGLE'),\n ('source', 'companyName-applicationName-versionID'),\n ('service', 'xapi'),\n ])\n\n request = urllib2.Request(url, post_data)\n response = urllib2.urlopen(request)\n\n content = '&'.join(response.read().split())\n query = cgi.parse_qs(content)\n auth = query['Auth'][0]\n\n response.close()\n return auth", "def get_info(self, sha256):\n url = self.API_URL % ('apks/', sha256, '')\n return requests.get(url, headers=self.headers, proxies=self.proxies, verify=self.verify_ssl)", "def get_external_oidc():\n\n unexpired_only = flask.request.args.get(\"unexpired\", \"false\").lower() == \"true\"\n\n global external_oidc_cache\n if not external_oidc_cache:\n data = {\n \"providers\": [\n {\n # name to display on the login button\n \"name\": idp_conf[\"name\"],\n # unique ID of the configured identity provider\n \"idp\": idp,\n # hostname URL - gen3fuse uses it to get the manifests\n \"base_url\": oidc_conf[\"base_url\"],\n # authorization URL to use for logging in\n \"urls\": [\n {\n \"name\": idp_conf[\"name\"],\n \"url\": generate_authorization_url(idp),\n }\n ],\n }\n for oidc_conf in get_config_var(\"EXTERNAL_OIDC\", [])\n for idp, idp_conf in oidc_conf.get(\"login_options\", {}).items()\n ]\n }\n external_oidc_cache = data\n\n # get the username of the current logged in user.\n # `current_user` validates the token and relies on `OIDC_ISSUER`\n # to know the issuer\n client = get_oauth_client(idp=\"default\")\n flask.current_app.config[\"OIDC_ISSUER\"] = client.metadata[\"api_base_url\"].strip(\"/\")\n username = None\n try:\n user = current_user\n username = user.username\n except Exception:\n flask.current_app.logger.info(\n \"no logged in user: will return refresh_token_expiration=None for all IdPs\"\n )\n\n # get all expirations at once (1 DB query)\n idp_to_token_exp = get_refresh_token_expirations(\n username, [p[\"idp\"] for p in external_oidc_cache[\"providers\"]]\n )\n\n result = {\"providers\": []}\n for p in external_oidc_cache[\"providers\"]:\n # expiration of the current user's refresh token\n exp = idp_to_token_exp[p[\"idp\"]]\n if exp or not unexpired_only:\n p[\"refresh_token_expiration\"] = exp\n result[\"providers\"].append(p)\n\n return flask.jsonify(result), 200", "def test_api_key(self):\n self.assertEqual(self.route4me.key, '11111111111111111111111111111111')", "def load_apikey():\n print \"Loading API keys\"\n fullpath = os.getenv(\"HOME\")\n try:\n keyfile = open(fullpath + '/.twitter.key', 'r')\n except: \n sys.exit(\"** ERROR ** \\n> Key file not found. Please check ~/.twitter.key\")\n\n for line in keyfile:\n # This is fucking ugly\n if re.match(\"ConsumerKey\", line):\n ConsumerKey = line\n if re.match(\"ConsumerSecret\", line):\n ConsumerSecret = line\n if re.match(\"AccessTokenKey\", line):\n AccessTokenKey = line\n if re.match(\"AccessTokenSecret\", line):\n AccessTokenSecret = line\n\n keyfile.close()\n return ConsumerKey, ConsumerSecret, AccessTokenKey, AccessTokenSecret", "def __init__(self, key):\r\n self._key = key\r\n self._authenticated = Deferred()", "def extractCredentials( self, request ):\n #log( 'extractCredentials')\n\n creds = {}\n session = request.SESSION\n username = None\n\n tokenTool = getToolByName(self, 'onetimetoken_storage')\n\n ob = session.get(self.session_var)\n if ob is not None and isinstance(ob, UsernameStorage):\n username = ob._getUsername()\n #log( \"session username: %s\" % username )\n \n if username is None: \n loginCode = request.get('logincode')\n\n if not loginCode:\n return None # not authenticated\n\n try:\n username = tokenTool.verifyToken(loginCode)\n except:\n log( \"Error, token tool refused token: %s\" % sys.exc_info()[0] )\n\n if not username:\n return None # not authenticated\n\n #log( \"token username: %s\" % username )\n\n userstorage = UsernameStorage()\n userstorage._setUsername(username)\n session[self.session_var] = userstorage\n\n creds['remote_host'] = request.get('REMOTE_HOST', '')\n try:\n creds['remote_address'] = request.getClientAddr()\n except AttributeError:\n creds['remote_address'] = request.get('REMOTE_ADDR', '')\n\n\n creds['login'] = username\n\n # log( \"returning username: %s\" % username )\n\n return creds", "def get_auth_header(self) -> Mapping[str, Any]:\n return {}", "def read_key():\n path = os.path.join(os.path.dirname(__file__), 'data')\n f = open(os.path.join(path, 'credential.txt'), 'r')\n key = f.read()\n f.close()\n return key", "def generate_access_key(self):\n\t\tfrom app import app\n\t\ts = JSONWebSignatureSerializer(app.config['SECRET_KEY'])\n\t\taccess_key = s.dumps({'username': self.username}) \n\t\tself.access_key = access_key", "def inner():\n hoststrings = []\n if env.key_filename == None: env.key_filename = []\n for host in host_dicts:\n hostname = host.get('hostname', '')\n user = host.get('user', '')\n port = host.get('port', '')\n hoststring = '%s%s%s' % (user and user + '@',\n hostname,\n port and ':' + str(port),\n )\n hoststrings.append(hoststring)\n key_filename = host.get('key_filename')\n if key_filename:\n env.key_filename.append(key_filename)\n env.hosts = hoststrings", "def api_key(self):\n return self._api_key" ]
[ "0.61862534", "0.6072772", "0.59618855", "0.58222324", "0.57261264", "0.5712428", "0.57017297", "0.5646959", "0.56169933", "0.55943763", "0.5513818", "0.5508162", "0.5504784", "0.5502838", "0.54992354", "0.5479007", "0.54416525", "0.5434418", "0.5419267", "0.5413086", "0.5412248", "0.54094154", "0.5400884", "0.5394506", "0.5362085", "0.53573483", "0.5341847", "0.53249246", "0.53070885", "0.5293761", "0.52652895", "0.52579653", "0.5257637", "0.5242142", "0.52420586", "0.5236164", "0.5235156", "0.5230933", "0.5229207", "0.5224266", "0.52184117", "0.52145344", "0.5201766", "0.51999104", "0.51978904", "0.5196146", "0.51902044", "0.51836425", "0.51676327", "0.5164897", "0.51635945", "0.51576114", "0.5149061", "0.5145525", "0.51305825", "0.5127123", "0.5127123", "0.5117749", "0.51161754", "0.5106346", "0.51038295", "0.5091888", "0.5090443", "0.5087591", "0.5085387", "0.5078118", "0.5077141", "0.5069922", "0.50666845", "0.50656813", "0.5065029", "0.5063497", "0.5061103", "0.5057027", "0.50507647", "0.5043898", "0.5043138", "0.50398487", "0.50382805", "0.5031237", "0.50281245", "0.50272644", "0.50160664", "0.50119126", "0.50085324", "0.5004326", "0.5004304", "0.5003798", "0.5003209", "0.5002434", "0.49928123", "0.49886635", "0.49875498", "0.49829873", "0.4981598", "0.49810845", "0.4976654", "0.49748778", "0.49745902", "0.49736226" ]
0.55181515
10
List of contact information
def contact_info(self): return [ { 'contact_info': c.get('contactInfo'), 'type': c.get('type'), 'primary': c.get('primary'), 'verified': c.get('verified'), } for c in self.entity_payload.get('contactInfo')]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_contact(self):\n contacts = \"\".join(str(contact) for contact in self.contact_list)\n print(contacts)", "def list_contacts(self):\n return self.contacts", "def list_contact(name):\n db = get_db()\n name = hashlib.sha256(name).hexdigest()\n \n if name in db:\n info = db[name]\n print logger.ok(\"\"\"\n Contact Information:\n Name: %s\n Phone Number: %s\n Email Address: %s\n \"\"\" % (info['name'], info['phone'], info['email']))\n else:\n sys.exit(logger.fail('fatal: contact does not exist'))", "def contact_list(self):\n return self._contact_list", "def search_contact_list(self):\n\n search_db = Database()\n result = search_db.contact_search(self.name)\n if not result:\n print Fore.YELLOW + ' No such contact'\n return None\n if result > 1:\n print ' Which contact ??'\n for items in result:\n if items[2] > 1:\n print Fore.BLUE + ' %s %s %s' % ([items[0]], items[1], items[2])\n else:\n print str(items[1]), items[2]\n\n return result", "def contact_info(self, sensitive=True):\n account_id = self.account_id()\n retry_count = 5\n\n req_url = self.get(\"/accounts/{}/contacts\".format(account_id))['ResultUrl']\n resp = self.get(req_url)\n tries = 0\n while 'Contacts' not in resp and tries < retry_count:\n resp = self.get(req_url)\n tries += 1\n time.sleep(1)\n contacts = resp['Contacts']\n\n contact_data = list()\n for contact in contacts:\n row_data = {\n 'ContactId': contact['Id'],\n 'Email': \"*****@****.***\" if sensitive else contact['Email'],\n 'FirstName': \"*****\" if sensitive else contact['FirstName'],\n 'LastName': \"*****\" if sensitive else contact['LastName'],\n 'Status': contact.get('Status'),\n 'MembeshipEnabled': contact.get('MembershipEnabled'),\n 'TermsOfUseAccepted': contact['TermsOfUseAccepted'],\n }\n\n if 'MembershipLevel' in contact:\n row_data['MembershipLevel'] = contact['MembershipLevel']['Name']\n\n # Map all field values into a dict for convenience\n field_values = {val['FieldName']: val['Value']\n for val in contact['FieldValues']}\n\n # Get list of authorizations\n if 'Managed Authorizations' in field_values:\n authorizations = [i['Label']\n for i in field_values['Managed Authorizations']]\n row_data['Authorizations'] = authorizations\n\n contact_data.append(row_data)\n self.__contact_df = pd.DataFrame(contact_data).set_index('ContactId')\n return self.__contact_df", "def show_contacts():\n data_list = queries2.contacts()[0]\n table_titles = queries2.contacts()[1]\n title = \"Contacts\"\n return render_template('pages.html', data_list=data_list, title=title, table_titles=table_titles)", "def list_contact(self, key, value):\n self.db.list_contact(\n key,\n value,\n )", "def contact_details(self) -> 'outputs.ContactDetailsResponse':\n return pulumi.get(self, \"contact_details\")", "def contact_details(self) -> 'outputs.ContactDetailsResponse':\n return pulumi.get(self, \"contact_details\")", "def contact_details(self) -> 'outputs.ContactDetailsResponse':\n return pulumi.get(self, \"contact_details\")", "def contact_info(self):\n return self._contact_info", "def getcontacts():\n contacts = {}\n\n try:\n #get list of contact ids\n contactids = r.smembers(\"contacts\")\n\n #for each contact id get data\n for contactid in contactids:\n contacts.update(_getcontact(str(contactid)))\n return contacts\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise", "def ListAllContacts(self):\n feed = self.gd_client.GetContacts()\n self.contacts = self.CleanPhoneNumbers(self.GetContactsInfo(feed))\n return self.contacts", "def get_contacts(self):\n contacts = Membership.objects.filter(entity = self, key_contact = True).order_by('importance_to_entity')\n return contacts", "def get_contacts(self):\n\n\t\treturn self.__contacts", "def appendedEntries(self):\n self.contact_list.append({\"name\": self.first_name.title() + \" \" + self.last_name.title(), \"phone number\": self.phone_number, \"phone number type\": self.phone_number_type})", "def GetContactList(self):\n\t\tfeeds = []\n\t\tfeed = self.client.GetContacts()\n\t\tfeeds.append(feed)\n\t\tnext = feed.GetNextLink()\n\t\twhile next:\n\t\t\tfeed = self.client.GetContacts(uri=next.href)\n\t\t\tfeeds.append(feed)\n\t\t\tnext = feed.GetNextLink()\n\t\t\n\t\tcontacts = []\n\t\tfor feed in feeds:\n\t\t\tif not feed.entry:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tfor i, entry in enumerate(feed.entry):\n\t\t\t\t\tcontacts.append(entry)\n\t\treturn contacts", "def receiveContactList(self, contactList):", "def view_contacts(self):\n with open(self.filename, \"r\") as contactsFile:\n contacts = self.display_contact(contactsFile.readlines())\n\n if not contacts:\n return self.msgbox(\"No contacts found.\")\n\n self.msgbox(msg=\"\\n\".join(contacts), title=\"Showing All Contacts\")", "def contacts():\n return render_template(\n \"contacts.html\",\n title = \"Contacts\")", "def get_contacts():\n return jsonify(g.driver.get_contacts())", "def contact_list(request):\n if request.method == 'GET':\n contact = Contact.objects.all()\n serializer = ContactSerializer(contact, many=True)\n return Response(serializer.data)\n elif request.method == 'POST':\n serializer = ContactSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data,\n status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)", "def contacts(self):\n return ContactCollection(self.request)", "def contacts(self):\r\n return contacts.Contacts(self)", "def get_all(self):\n total_contacts = []\n get_count = {\n 'query': {\n 'object': 'CONTACT',\n 'select': {\n 'field': 'RECORDNO'\n },\n 'pagesize': '1'\n }\n }\n\n response = self.format_and_send_request(get_count)\n count = int(response['data']['@totalcount'])\n pagesize = 2000\n offset = 0\n for i in range(0, count, pagesize):\n data = {\n 'query': {\n 'object': 'CONTACT',\n 'select': {\n 'field': [\n 'RECORDNO',\n 'CONTACTNAME',\n 'COMPANYNAME',\n 'FIRSTNAME',\n 'LASTNAME',\n 'INITIAL',\n 'PRINTAS',\n 'TAXABLE',\n 'MAILADDRESS.ADDRESS1'\n ]\n },\n 'pagesize': pagesize,\n 'offset': offset\n }\n }\n contacts = self.format_and_send_request(data)['data']['CONTACT']\n total_contacts = total_contacts + contacts\n offset = offset + pagesize\n return total_contacts", "def get_contacts_list(self):\n return [(id + 1, contact) for id, contact in enumerate(self.contact_list)]", "def get_contacts_list(self):\n contacts = self.driver.find_elements_by_class_name(\"_1wjpf\")\n s= [contact.text for contact in contacts] #extracts chats and last messsages\n print (\"get contacts: \"+str(s)) #print only chat names\n return s[::2] #returns only chat names", "def contact(request):\n\n contacts = ContactDetails.objects\n return render(request, 'contact_app/contact.html', {\"contacts\":contacts})", "def _parse_contact_information(self):\n left_column = self.content.find(\"div\", class_=\"linkeSpalte40\")\n graubox = left_column.find(\n lambda tag: tag.name == \"div\" and tag[\"class\"] == [\"grauBox\"]\n )\n\n emails_raw = graubox.find_all(\"a\", class_=\"mail\")\n websites_raw = graubox.find_all(\"a\", class_=\"noDecoration\")\n telephone_raw = graubox.find_all(\"span\", class_=\"telefonnummer\")\n address_raw = [\n e.nextSibling for e in graubox.find_all(\"em\") if e.text == \"Anschrift:\"\n ]\n\n address = address_raw[0].li.get_text(\"\\n\") if address_raw else None\n emails = [re.sub(r\"^mailto:\", \"\", e.attrs[\"href\"]) for e in emails_raw]\n phone_numbers = [t.text for t in telephone_raw]\n websites = [w.attrs[\"href\"] for w in websites_raw]\n\n return {\n \"address\": address,\n \"emails\": emails,\n \"phone_numbers\": phone_numbers,\n \"websites\": websites,\n }", "def contact_details(self):\n return self.data.get(\"contactDetails\")", "def get_contact_info(self):\n outputDict = {\"USERNAME\": consts.USERNAME,\n \"IP\": consts.IPADDRESS, \n \"MACHINE\": consts.HOSTNAME, \n \"EMAIL\": 'ckenne24@student.scad.edu', \n \"PHONE\": '203-722-6620'} # ::: TO DO::: dynamically get phone and email info automatically\n return outputDict", "def contact(self, request, **kwargs):\n group_obj = self.get_object()\n contact_data = group_obj.contacts.all()\n if contact_data is not None:\n serializer_data = ContactSerializer(contact_data, many=True)\n return Response(serializer_data.data)\n else:\n return Response({'message': 'No details found for contact of this group'}, status=status.HTTP_404_NOT_FOUND)", "def get(self):\n args = GET_PARSER.parse_args()\n print(f'args={args}')\n\n return Contacts().get_all(\n args[\"phonetypeOne\"],\n args[\"phonetypeTwo\"],\n args[\"phonetypeThree\"],\n args[\"firstName\"],\n args[\"lastName\"],)", "def present_data(self, data=None):\n print('--------------------------------------------------------------------------')\n print('{:<10}{:<10}{:<15}{:<17}{:<17}'.\n format(\n 'index',\n 'name',\n 'surname',\n 'email',\n 'phone'\n )\n )\n print('--------------------------------------------------------------------------')\n\n data = data if data else self.contacts\n for contact in data:\n print('{:<10}{:<10}{:<15}{:<17}{:<17}'.\n format(\n contact[0],\n contact[1],\n contact[2],\n contact[3],\n contact[4]\n )\n )", "def fetch_contacts(owner_account_id):\n resp = oauth.tapkey.get(f\"Owners/{owner_account_id}/Contacts?$select=id,identifier\")\n contacts = resp.json()\n return contacts", "def pull_one_contact(self, name):\n contact = []\n for x in self.contacts:\n if x[0] == name:\n contact_name = x[0]\n number = x[1]\n email = x[2]\n zipcode = x[3]\n contact = [contact_name, number, email, zipcode]\n print(contact)\n return contact, self.contacts.index(x)", "def get_contact_info(self):\n return f\"Contact {self} at {self.email}\"", "def get_queryset(self):\n return self.request.user.contacts.all()", "def contacts(request):\n User = get_user_model()\n ids = set(request.user.chatmessage_set.all().values_list(\"recipients\", flat=True))\n context = {\n 'contacts': User.objects.filter(pk__in=ids)\n }\n return render(request, \"chat/contacts.html\", context)", "def add_contact(self):\n contact_list = {}\n contact_list[self.my_number] = self.name\n connect_db = Database()\n connect_db.add_contact(self.name, self.my_number)", "def get_info(self,who=None):\n alluri = []\n if who == None:\n return self.get_personal_info()\n\n if type(who) is not list:\n alluri.append(who) \n else:\n alluri = who\n \n self.get(\"INFO\",\"GetContactsInfo\",alluri)\n response = self.send()\n return response", "def get_contact_info(self, html_content: str) -> object:\n if not html_content:\n raise Exception(\"HTML content not found\")\n\n soup = BeautifulSoup(html_content, 'html.parser')\n\n self.contact = {}\n cards = soup.select(self.tags.get(\"contact.panels\"))\n\n # read cards panels for cotnact info\n for card in cards:\n form = card.parent.select_one(\"form\")\n\n # if is form of user information\n if form:\n rows = form.select(self.tags.get(\"contact.form.row\"))\n for row in rows:\n label = row.select_one(self.tags.get(\"contact.form.row.label\")).get_text(strip=True)\n value = row.select_one(self.tags.get(\"contact.form.row.value\")).get_text(strip=True)\n\n if label == \"User ID\":\n self.contact[\"account\"] = value\n\n elif label == \"Name\":\n self.contact[\"full_name\"] = value\n\n elif label == \"Email\":\n self.contact[\"email\"] = value\n\n else:\n lis = card.parent.select(\"li\")\n for li in lis:\n label = li.select_one(\"label\").get_text(strip=True)\n if label == \"Address\":\n street1 = get_value(li.select_one(self.tags.get(\"contact.address.street1\"))).strip()\n street2 = get_value(li.select_one(self.tags.get(\"contact.address.street2\"))).strip()\n state = get_value(li.select_one(self.tags.get(\"contact.address.state\"))).strip()\n postalcode = get_value(li.select_one(self.tags.get(\"contact.address.zip\"))).strip()\n\n self.contact[\"address_line1\"] = street1\n self.contact[\"address_line2\"] = street2\n self.contact[\"address_state\"] = letters_only(state.strip())\n self.contact[\"address_postal_code\"] = postalcode\n\n elif label in [\"Phone\", \"Time Zone\"]:\n\n key = \"phone_number\" if label == \"Phone\" else \"timezone\"\n self.contact[key] = li.select_one(self.tags.get(\"contact.phone\")).get_text(strip=True).strip()\n\n return self.contact", "async def get(self):\n await self.handle_request(self.contacts_new_api, 1)", "def contact_information(self) -> ContactInformation:\n return self._contact_information", "def Run(self):\n return self.ListAllContacts()", "def index(self):\n contact = Contacts.query.order_by(desc(Contacts.Created)).first_or_404()\n content = jsonify({\n \"contacts\": [{\n \"id\": contact.ContactsID,\n \"email\": contact.Email,\n \"techRider\": contact.TechRider,\n \"inputList\": contact.InputList,\n \"backline\": contact.Backline,\n \"createdAt\": contact.Created,\n \"updatedAt\": contact.Updated,\n }]\n })\n\n return make_response(content, 200)", "def update_contacts(self):\n self.contacts = self.db.list_contacts()\n return self.list_contacts()", "def get_all_contacts(self):\n self.init_db(self._testing)\n\n query = \"SELECT {} FROM {} ORDER BY id;\".format(\", \".join(Contact.columns_with_uid), Contact.table_name)\n\n data = self.db.conn.execute(query)\n\n return [Contact(*item) for item in data]", "def test_display_all_contact(self):\n self.assertEqual(Contact.display_contacts(), Contact.contact_list)", "def contact_list(self, contact_list):\n \n self._contact_list = contact_list", "def get_contacts(filename):\n \n \n emails = []\n with open(filename, mode='r') as contacts_file:\n for a_contact in contacts_file:\n\n emails.append(a_contact.split()[0])\n return emails", "def search_contact():\n if request.method == 'GET':\n tel = request.args.get('tel')\n contact = io_client.get_contacts(urn=['tel:+52' + tel]).all()\n if contact:\n return jsonify({\"existe\": \"Si\"}), 201\n return jsonify({\"existe\": \"No\"}), 404", "def get_description(self):\n return self['contact_name']", "def contacts_list_update(self):\n\t\tself.database.contacts_clear()\n\t\tclient_log.debug(f'Запрос контакт листа для пользователся {self.name}')\n\t\treq = {\n\t\t\tACTION: GET_CONTACTS,\n\t\t\tTIME: time.time(),\n\t\t\tUSER: self.username\n\t\t}\n\t\tclient_log.debug(f'Сформирован запрос {req}')\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, req)\n\t\t\tans = get_message(self.transport)\n\t\tclient_log.debug(f'Получен ответ {ans}')\n\t\tif RESPONSE in ans and ans[RESPONSE] == 202:\n\t\t\tfor contact in ans[LIST_INFO]:\n\t\t\t\tself.database.add_contact(contact)\n\t\telse:\n\t\t\tclient_log.error('Не удалось обновить список контактов.')", "def print_all(self):\n with open(self.file, 'r', encoding='utf-8') as self.contacts_file:\n for i in self.contacts_file.readlines():\n print(i)", "def show_user_contacts(user_id):\n\n user_contacts = Contact.query.filter_by(user_id=user_id).all()\n\n contacts = []\n for contact in user_contacts:\n contacts.append( { 'contact_id': contact.contact_id,\n 'first_name': contact.first_name,\n 'last_name': contact.last_name,\n 'email': contact.email } )\n\n return jsonify(contacts)", "def test_get_contact(self):\n pass", "def getFullInformation(self):\n request = requests.get(self.url, headers=REQUEST_HEADERS)\n if request.status_code == 200:\n # Got a valid response\n souped = BeautifulSoup(request.text, \"html5lib\")\n description = souped.find(\"div\", id=\"vip-description-text\").string\n if description:\n self._description = description.strip()\n else:\n self._description = \"\"\n contact = souped.find(class_=\"phone\")\n if not contact:\n self._contact_name, self._contact_number = [\"\",\"\"]\n else:\n if \" on \" in contact.string:\n self._contact_name, self._contact_number = contact.string.split(\" on \")\n else:\n self._contact_name, self._contact_number = [\"\", contact.string]\n\n gmaps_link = souped.find(\"a\", class_=\"open_map\")\n if gmaps_link:\n self._latitude, self._longitude = re.search(\"center=(-?\\w.*),(-?\\d.*)&sensor\", gmaps_link.get(\"data-target\")).groups()\n else:\n self._latitude, self._longitude = [\"\", \"\"]\n\n return\n else:\n # TODO: Add error handling\n print (\"Server returned code: \" + request.status_code + \" for \" + url)\n return []", "def get_contact(self, response: Response) -> dict:\n contact = {'email': '', 'phone': '', 'website': response.url, 'meet': ''}\n contact['email'] = response.xpath(\"//a[@class='email-tech']/@href\").get().split(\":\")[-1]\n return contact", "def add_contact(self, name, number, email, zipcode):\n \n new_contact = f\"{name}, {number}, {email}, {zipcode}\"\n contact_list = [name,number,email,zipcode]\n self.contacts.append(contact_list)\n self.save()\n print(f\"Thank you {new_contact} has been added to your contact book.\")", "def test_get_contact_objects(self):\n\n contacts = MessageController.get_contact_objects(['2'])\n self.assertEqual(contacts[0].contact_first_name, 'Contact2')\n self.assertEqual(contacts[0].contact_phone, '4153417706')\n self.assertEqual(contacts[0].user_id, 1)\n self.assertEqual(contacts[0].lang_id, 1)", "def read_phone_contacts(person_id):\n try:\n conn = sqlite3.connect(settings.database_name)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"PRAGMA foreign_keys = ON\")\n c.execute(\"SELECT c.personid, c.contactid, p.typeid, p.sequenceno, p.areacode, p.exchange, p.trunk, \"\n \"co.typecode, co.typedescription \"\n \"FROM contact AS c \"\n \"JOIN phone AS p ON c.contactid = p.contactid \"\n \"JOIN codes co on co.typeid = p.typeid \"\n \"WHERE c.personid = ? ORDER BY p.sequenceno ASC;\", (person_id,))\n\n phone_list = []\n for row in c:\n _phone = Phone()\n _phone.person_id = row[\"personid\"]\n _phone.contact_id = row[\"contactid\"]\n _phone.phone_type_id = row[\"typeid\"]\n _phone.sequence_number = row[\"sequenceno\"]\n _phone.area_code = row[\"areacode\"]\n _phone.exchange = row[\"exchange\"]\n _phone.trunk = row[\"trunk\"]\n _phone.type_code = row[\"typecode\"]\n _phone.phone_type_id = row[\"typeid\"]\n _phone.type_description = row[\"typedescription\"]\n phone_list.append(_phone)\n conn.close()\n return phone_list\n except:\n return []", "def get_contacts(userid):\n return 'get contacts - ' + userid", "def get_queryset(self):\n contact_data = Contact.objects.filter(contact_groups__in=Member.objects.filter(\n user=self.request.user).values('group_id').distinct())\n\n return contact_data", "def test_get_contacts(self):\n pass", "def getallcontacts(self):\n feed_url = self.contacts_client.GetFeedUri(projection='full')\n total_read = 0\n while True:\n print('Retrieving contacts... (%d retrieved so far)' % total_read)\n feed = self.contacts_client.get_feed(uri=feed_url,\n auth_token=None,\n desired_class=gdata.contacts.data.ContactsFeed)\n total_read += len(feed.entry)\n for entry in feed.entry:\n yield entry\n next_link = feed.GetNextLink()\n if next_link is None:\n print('All contacts retrieved: %d total' % total_read)\n break\n feed_url = next_link.href", "def getContactsData(service, groupResourceName, maxMembers):\n # get the ids of the contacts inside the specified group\n contactsIDs = service.contactGroups().get(\n resourceName=groupResourceName, \n maxMembers=maxMembers).execute()[\"memberResourceNames\"]\n\n # get data of the contacts that correspond to the ids obtained\n contactsData = service.people().getBatchGet(\n resourceNames=contactsIDs,\n personFields='names,emailAddresses').execute()[\"responses\"]\n\n # extract the names and the emailAddresses of the contacts\n namessList = [] \n mailsList = []\n for contact in contactsData:\n try:\n namessList.append(contact[\"person\"][\"names\"][0][\"displayName\"])\n except:\n raise Exception(\"All contacts must have a name associated\")\n mailsList.append(contact[\"person\"][\"emailAddresses\"][0][\"value\"])\n return namessList, mailsList", "def list_contacts(self, prefix):\n sub_trie = self.find(prefix.lower())\n _crawl_trie(sub_trie, prefix)", "def get_contacts(self):\n feet = [\"REAR_RIGHT_FOOT\", \"REAR_LEFT_FOOT\",\n \"FRONT_RIGHT_FOOT\", \"FRONT_LEFT_FOOT\"]\n contacts = np.zeros(4, dtype=np.float32)\n for i, foot in enumerate(feet):\n if self.supervisor.getFromDef(foot).getNumberOfContactPoints() > 0:\n contacts[i] = 1.0\n return contacts", "def do_show(self, line):\n\t\tif isinstance(self.cl, Book):\n\t\t\tprint(\"Contacts in the current book\\n\")\n\t\t\tself.cl.list_contacts()\n\t\telse:\n\t\t\tprint(\"To see contacts you need to open or create book\")", "def simple_contacts(filename):\n\n try:\n file_path = open(filename, 'r', encoding='utf-8')\n\n except FileNotFoundError:\n pretty_print(\"Cannot open contacts.txt\", \":\")\n sleep(3)\n\n else:\n with file_path:\n print_list = []\n email_dict = {}\n for line in file_path:\n split_line = line.strip().split('|')\n\n if split_line[0].isnumeric():\n\n command = int(split_line[0])\n email = split_line[-1]\n print_list.append(split_line)\n email_dict[command] = email\n\n return print_list, email_dict", "def add_contact(self):\n contact = Contact.create_contact()\n self.contact_list.append(contact)\n\n df = pd.read_csv('address_book.csv')\n #print(df)\n adf = pd.DataFrame({'FIRST NAME': [contact.first_name],\n 'LAST NAME': [contact.last_name],\n 'ADDRESS': [contact.address],\n 'CITY': [contact.city],\n 'STATE': [contact.state],\n 'ZIP CODE': [contact.zip],\n 'PHONE NUMBER': [contact.phone_number],\n 'EMAIL': [contact.email]})\n adf.to_csv('address_book.csv',mode='a', header=False, index=None)\n #storing all contacts in address_book.csv file\n \"\"\"with open(\"address_book.csv\", \"w\") as f:\n for contact in self.contact_list:\n f.write(f\"FIRST NAME -> {contact.first_name}\\n\"\n f\"LAST NAME -> {contact.last_name}\\n\"\n f\"ADDRESS -> {contact.address}\\n\"\n f\"CITY -> {contact.city}\\n\"\n f\"STATE -> {contact.state}\\n\"\n f\"ZIP CODE -> {contact.zip}\\n\"\n f\"PHONE NUMBER -> {contact.phone_number}\\n\"\n f\"EMAIL -> {contact.email}\\n\\n\")\"\"\"", "def get_contacts(request, company_id):\n try:\n company = Company.objects.get(pk=company_id)\n\n if not (request.user.company_id == int(company_id) or request.user.is_admin == True):\n raise Exception(\"Fobiden: requesting user doesn't have permission to specified Company.\")\n\n contacts = []\n for contact in Contact.objects.filter(company=company):\n contacts.append(contact.dump_to_dict())\n\n return format_ajax_response(True, \"Company contacts listing retrieved successfully.\", {'contacts': contacts})\n except Exception as ex:\n logger.error(\"Failed to get_contacts: %s\" % ex)\n return format_ajax_response(False, \"There was a problem retrieving company contacts listing.\")", "def __init__(self, contact_detail):\n\t\tself.first_name = contact_detail['First Name'].strip()\n\t\tself.last_name = contact_detail['Last Name'].strip()\n\t\tself.mobile = contact_detail['Mobile Phone'].strip()\n\t\tself.email = contact_detail['E-mail Address'].strip()", "def search_contact(request, **kwargs):\n limit = int(request.GET.get('limit', constants.DEFAULT_LIMIT))\n offset = int(request.GET.get('offset', constants.DEFAULT_OFFSET))\n search_term = request.GET.get('search_term')\n contact = private.Contact()\n data = contact.fetch_list(limit, offset, search_term)\n return JsonResponse({'objects': data})", "def first_contact(self) -> List[str]:\n error_list = []\n return error_list", "def showEditContact(self):", "def contact(self):\n return self._contact", "def contact(self):\n return self._contact", "def __init__(self, first_name=\" \", last_name=\" \", phone_number=0, phone_number_type=\" \", contact_list=[]):\n self.first_name = first_name\n self.last_name = last_name\n self.phone_number = phone_number\n self.phone_number_type = phone_number_type\n self.valid_phone_number_types = [\"home\", \"office\", \"cell\"]\n self.contact_list = contact_list", "def create_contacts_list(self, contactsfile, templatefile='templates/contacts_list_template.tex', encoding='utf-8'):\n\n self.tex = \"\"\n\n with open(templatefile, 'r', encoding=encoding) as f:\n template = f.read().split(\"<+CONTACTS+>\")\n\n\n first_table = True\n\n with open(contactsfile, 'r', encoding=encoding) as c:\n lines = c.readlines()\n\n for line in lines:\n line = line.strip()\n\n if len(line) > 0:\n if line[0] == '#' and line[1] != '#':\n # Line is a heading.\n\n # We should end the previous table, if any:\n if not first_table:\n self.tex += \"\\n\\\\end{longtable}\\\\vspace*{1em}\\n\"\n\n self.tex += \"{{\\Large\\\\bfseries {heading}}}\\n\".format(heading=line.strip(\"# \"))\n\n elif line[0] == '#' and line[1] == '#':\n # Line specifies column headers.\n first_table = False\n\n split_line = line.strip('# ').split(';')\n n_cols = len(split_line)\n\n self.tex += \"\\\\begin{{longtable}}{{*{{{n}}}{{l}}}}\\n\".format(n=n_cols)\n\n headers = \"\"\n for i,word in enumerate(split_line):\n if i == 0:\n headers += \"\\\\textbf{{{word}}}\".format(word=word.strip())\n else:\n headers += \" & \\\\textbf{{{word}}}\".format(word=word.strip())\n headers += \"\\\\\\\\\\n\"\n self.tex += r\"\"\"\n\\toprule\n{headers}\n\\midrule\n\\endfirsthead\n\n\\toprule\n{headers}\n\\midrule\n\\endhead\n\n\\bottomrule\n\\endfoot\n\"\"\".format(headers=headers)\n\n else:\n # Line contains contact information:\n split_line = line.strip().split(';')\n if len(split_line) != n_cols:\n print(\"Warning! Line does not have the right number of columns! Line: {}\".format(line))\n\n for i,word in enumerate(split_line):\n if i == 0:\n self.tex += \"{word}\".format(word=word.strip())\n else:\n self.tex += \" & {word}\".format(word=word.strip())\n\n self.tex += \"\\\\\\\\\\n\"\n\n self.tex += \"\\\\end{longtable}\"\n template.insert(1,self.tex)\n self.tex = \"\\n\".join(template)", "def contact(request):\n assert isinstance(request, HttpRequest)\n contact = models.ContactUs.objects.all()\n return render(\n request,\n 'app/contact.html',\n {\n 'title':'Contact Us',\n 'message':'Our contact information:',\n 'year':datetime.now().year,\n 'contact': contact\n }\n )", "def read_email_contacts(person_id):\n try:\n conn = sqlite3.connect(settings.database_name)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"PRAGMA foreign_keys = ON\")\n\n c.execute(\"SELECT c.personid, c.contactid, e.emailaddress, e.sequenceno, \"\n \"co.typeid, co.typecode, co.typedescription \"\n \"FROM contact AS c \"\n \"JOIN email AS e ON e.contactid = c.contactid \"\n \"JOIN codes co on co.typeid = e.typeid WHERE c.personid =? ORDER BY e.sequenceno ASC;\", (person_id,))\n\n email_list = []\n for row in c:\n _email = Email()\n _email.person_id = row[\"personid\"]\n _email.contact_id = row[\"contactid\"]\n _email.email_address = row[\"emailaddress\"]\n _email.sequence_number = row[\"sequenceno\"]\n _email.email_type_id = row[\"typeid\"]\n _email.type_code = row[\"typecode\"]\n _email.type_description = row[\"typedescription\"]\n email_list.append(_email)\n conn.close()\n return email_list\n except:\n return []", "def test_display_all_contacts(self):\n self.assertEqual(Contact.display_all_contacts(), Contact.contact_list)", "def _get_contacts(self, tgt):\n with open(tgt, mode='r', encoding='utf-8') as f:\n str_contents = f.read()\n self.contacts = json.loads(str_contents)\n return", "def contact():\n return dict(\n title='Contact',\n message='Your contact page.',\n year=datetime.now().year\n )", "def all_in_contact(cls, contact_id: int):\n for contact_tag in cls.get_all_in(\"contacts\", contact_id):\n yield contact_tag", "def get(self, set=''):\n params = {}\n if set: params['set'] = set\n\n request = self._connection.get('contacts.json', params=params)\n if request.status_code != 200:\n raise Exception('status code {0}: cannot get contacts'.format(request.status_code))\n return [User.parse(self._connection, each) for each in request.json()]", "def append_contacts(self, lines, lang):\n if lang==\"en\":\n lines.append(\"section Contacts\")\n elif lang==\"it\":\n lines.append(\"section Contatti\")\n lines.append(\"mailto://%s e-mail\" % flags['MAIL'])\n lines.append(\"verbatim %s\" % SKYPE)\n lines.append(\"verbatim &nbsp;\")\n return lines", "def test_get_contact_lists(self):\n url, parsed = self.prepare_urls('v1:contact_list-list', subdomain=self.company.subdomain)\n \n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n content = json.loads(response.content)\n self.assertEqual(len(content), self.contact_lists_count)", "def get_contacts():\n # Parse command line options\n try:\n opts, args = getopt.getopt(sys.argv[1:], '', ['user=', 'password='])\n except getopt.error, msg:\n print 'python contacts_example.py --user [username] --password [password]'\n sys.exit(2)\n user = ''\n password = ''\n # Process options\n for option, arg in opts:\n if option == '--user':\n user = arg\n elif option == '--password':\n password = arg\n\n while not user:\n print 'NOTE: Please run these tests only with a test account.'\n user = raw_input('Please enter your username: ')\n while not password:\n password = getpass.getpass()\n if not password:\n print 'Password cannot be blank.'\n try:\n contacts = GoogleContacts(user, password)\n except gdata.client.BadAuthentication:\n print 'Invalid user credentials given.'\n exit(1)\n contacts_list = contacts.Run()\n return contacts_list", "def _get_receivers_list(self):\n\n # TODO: document what this plugin expects to be in Dockerfile/where it gets info from\n global_component = self._get_component_label()\n # this relies on bump_release plugin configuring source.git_commit to actually be\n # branch name, not a commit\n if not isinstance(self.workflow.source, GitSource):\n raise PluginFailedException('Source is not of type \"GitSource\", panic!')\n git_branch = self.workflow.source.git_commit\n try:\n r = requests.get(urljoin(self.pdc_url, 'rest_api/v1/release-component-contacts/'),\n headers={'Authorization': 'Token %s' % self._get_pdc_token()},\n params={'global_component': global_component,\n 'dist_git_branch': git_branch,\n 'role': self.pdc_contact_role},\n verify=self.pdc_verify_cert)\n except requests.RequestException as e:\n self.log.error('failed to connect to PDC: %s', str(e))\n raise RuntimeError(e)\n\n if r.status_code != 200:\n self.log.error('PDC returned status code %s, full response: %s',\n r.status_code, r.text)\n raise RuntimeError('PDC returned non-200 status code (%s), see referenced build log' %\n r.status_code)\n\n contacts = r.json()\n\n if contacts['count'] == 0:\n self.log.error('no %s role for the component', self.pdc_contact_role)\n raise RuntimeError('no %s role for the component' % self.pdc_contact_role)\n\n send_to = []\n for contact in contacts['results']:\n send_to.append(contact['contact']['email'])\n\n return send_to", "def read_contacts(contacts_file, uidlist, used_uid_names):\n # Read in out values to a list\n lines = read_contacts_to_list(contacts_file)\n\n # Convert our list to dict\n contactsdict = contacts_from_list_to_dict(lines, uidlist, used_uid_names)\n\n #logging.debug(\"Aliasdict: %s\" % aliasdict)\n return contactsdict", "def get_queryset(self):\n user = self.request.user\n return Contact.objects.filter(owner=user)", "def db_show_all():\n the_list = []\n db = sh.open(the_phone_book_name, flag='c', writeback=True)\n for key in db:\n person = Person()\n person.name = key\n person.phone = db[key]\n the_list.append(person)\n display_list(the_list)\n db.close()", "def contacts(self, contacts):\n\n self._contacts = contacts", "def contacts(self, contacts):\n\n self._contacts = contacts", "def extract_contacts(filename):\n # This list returns the final data\n names = []\n\n # open file\n f = open(filename, 'rU')\n text = f.read()\n\n # extract site name\n match_site = re.search(r'Property:\\s*(\\w+)', text)\n if not match_site:\n sys.stderr.write('No site match')\n sys.exit(1)\n site = match_site.group(1)\n names.append(site)\n\n # extract names\n # name_match = re.findall(r'p\\w+,,\"(\\w+,\\s\\w+)\"[\\.\\w*\\s*,()-]+(\\d+/\\d+/\\d+)[,]+(\\d+/\\d+/\\d+)[,]+(\\d+/\\d+/\\d+)', text)\n tuples = re.findall(r't\\d+[,]+([*]*\\w+\\s*\\w+)[\\.\\w*\\s*\",()-]+(\\d+/\\d+/\\d+)[,]+(\\d+/\\d+/\\d+)[,]+(\\d+/\\d+/\\d+)', text)\n if not tuples:\n sys.stderr.write(\"No names matches\")\n sys.exit(1)\n # store data in a dictionary of names\n names_dict = {}\n for name, lease_begin, lease_end, move_in in tuples:\n if name not in names_dict:\n names_dict[name] = [lease_begin, lease_end, move_in]\n for k in sorted(names_dict.keys()):\n names.append(k + ',' + names_dict[k][0] + ',' + names_dict[k][1] + ',' + names_dict[k][2])\n return names", "def get_contact_interactions(request, pk):\n try:\n contact = Contact.objects.get(pk=pk)\n except Contact.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n ans = []\n interactions = contact.interaction_set.all()\n for interaction in interactions:\n ans.append(InteractionSerializer(interaction).data)\n return Response(ans)" ]
[ "0.78861237", "0.76341623", "0.758478", "0.75249934", "0.75059426", "0.7496882", "0.7042957", "0.70327526", "0.7013183", "0.7013183", "0.7013183", "0.69496447", "0.69449735", "0.69267607", "0.69231725", "0.6869843", "0.6860492", "0.6860492", "0.6857307", "0.6849241", "0.68165183", "0.6810057", "0.6795768", "0.6778526", "0.67731774", "0.6767155", "0.6766361", "0.67116624", "0.67038435", "0.66642797", "0.66176784", "0.6569828", "0.65056694", "0.64778817", "0.6466393", "0.6426784", "0.64231014", "0.63976514", "0.6361572", "0.63555604", "0.6347617", "0.6327037", "0.627793", "0.6267475", "0.6241512", "0.61969644", "0.61802155", "0.61597097", "0.6097499", "0.6067127", "0.6044159", "0.60428756", "0.6042528", "0.6036226", "0.6033711", "0.60246205", "0.602323", "0.6019983", "0.6012464", "0.6007957", "0.5997714", "0.59959203", "0.5976885", "0.59710836", "0.5968355", "0.5936337", "0.5925819", "0.5907065", "0.58908856", "0.58769476", "0.5874677", "0.58736193", "0.5873566", "0.5851846", "0.5851463", "0.58452946", "0.584098", "0.5836183", "0.58337325", "0.58337325", "0.583071", "0.5829123", "0.5806867", "0.5798989", "0.5798175", "0.57934785", "0.57913846", "0.57805854", "0.5777584", "0.57767683", "0.5776302", "0.5774586", "0.5755136", "0.57474035", "0.57128704", "0.5710869", "0.5689369", "0.5689369", "0.56849855", "0.5669137" ]
0.7561425
3
Create a formatted recommnedation URI based on user id.
def resolve_worker_evaluation_url(request, user): return request.build_absolute_uri(reverse('hirer:evaluate', args=[user.id]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_user_link(user):\n return '[@{0}](https://github.com/{0})'.format(user)", "def create_qr_uri(user: User) -> dict:\n otp_secret = pyotp.random_base32()\n qr_uri = pyotp.totp.TOTP(otp_secret).provisioning_uri(\n name=user.username, issuer_name=f\"{APPNAME} ({MAIN_VERSION_NAME})\"\n )\n user.otp_secret = otp_secret\n user.save()\n return {\n 'qr_uri': qr_uri,\n 'otp_secret': otp_secret,\n 'msg': ('Two-factor authentication is obligatory on this server. '\n 'Please visualize the QR code to set up authentication.')\n }", "def recomenadation_user_based(user_id):\n try:\n user_id = int(user_id)\n return str( user_based( rec_matrix, user_item, user_id ) )\n except AssertionError as ae:\n # if index + 1 not in df.userid.value_counts().index:\n # return {\"sort_ids\": [int(item) for item in df.item_id.value_counts().index[:3]]}\n return str(ae)\n except Exception as e:\n traceback.print_exc()\n return \"0\"", "def _format_api_url(self, url):\n user_name = self._get_user_name()\n # format and return url\n return url.format(\n user_name = user_name,\n element = urllib.quote(self.qnet_element.encode('utf-8'), safe=''),\n token = self._md5(\"%s:%s:%s\" % (user_name, self.iteration_id, self._secret_key))\n )", "def remake_user_url(self, share_url):\n signature_list = [\"ToWZhwAALto9c5Po75QH1k6FmZ\", \"OCp12QAAWHxL3H-2aPAunDgqdc\", \"U6o86AAAM.ogXDaHGe4txlOqPP\",\n \"P0BGzAAAXyNMtkyjOHBM2z9ARt\"]\n text_head = \"https://www.iesdouyin.com/web/api/v2/aweme/post/?\"\n text_mid = \"count=21&\"\n text_cursor = \"max_cursor={}\".format(self.cursor)\n text_sign = random.choice(signature_list)\n text_aid_sign = \"&aid=1128&_signature=\" + text_sign + \"&dytk=\"\n text_sec_num = get_sec_id(share_url)\n remake = text_head + text_sec_num + text_mid + text_cursor + text_aid_sign\n return remake", "def _assemble_id_url(self, award_id):\n award_id_api = 'http://api.nsf.gov/services/v1/awards/{}.xml?'\\\n .format(award_id)\n search_params = self._build_param_request()\n include = self._build_field_request()\n request_url = award_id_api + include + search_params\n return request_url", "def generate_uri(uri):\n return uri[:-5] + uuid.uuid4().hex", "def user2Link(user): \n # could also look up mail addrs via a table lookup, etc\n return '<a href=\"mailto:%(user)s@somewebsite.com\">%(user)s</a>' % {\"user\": user}", "def recommendation_item_based(user_id, item_id):\n try:\n user_id = int(user_id)\n item_id = int(item_id)\n return str( item_based( user_item, user_id, item_id) )\n except AssertionError as ae:\n return str(ae)\n except Exception as e:\n traceback.print_exc()\n return \"0\"", "def get_url_from_id(doc_id):\n return f\"{BASE_URL}/view/{doc_id}\"", "def make_receipt_url(*, base_url, readable_id):\n dashboard_url = urljoin(base_url, reverse(\"user-dashboard\"))\n return f\"{dashboard_url}?status=purchased&purchased={quote_plus(readable_id)}\"", "def get_uri_for_user(self, target_user):\r\n users = self.get_json(USER_LIST_URI)[\"results\"]\r\n for user in users:\r\n if user[\"id\"] == target_user.id:\r\n return user[\"url\"]\r\n self.fail()", "def userDocumentId(self, id: str) -> str:", "def build_review_url(self, cipherid, offset=0, limit=20, sort='helpful'):\n params = {\n 'offset': offset,\n 'limit': limit,\n 'sort': sort\n }\n query = urllib.urlencode(params)\n return 'https://www.beautylish.com/rest/reviews/p-{cipherid}?{query}'.format(cipherid=cipherid, query=query)", "def _get_base_url(self):\n\n # This should have been established by _logon\n assert self.__userid\n\n return \"/users/%s\" % self.__userid", "def create_uri(self, chunk, document = False):\n # first we create the the base uri\n if self.options.get(\"prefix\"):\n prefix = self.options.get(\"prefix\")\n else:\n prefix = gethostname() + \"#\"\n\n word = chunk.split(\"/\")[0] if not document else chunk\n\n ## calculate the index of the current chunk\n # find all indices in the text\n indices = [m.start() for m in re.finditer(re.escape(word), self.text)]\n\n # indices could be None because of - I think - a bug in MontyLingua\n # which tags me/you as me/PRP :/: you/PRP and so the colon can't be\n # found in the orginal text. Because the slash is the only known\n # case of this bug, we simply replace the colon\n if not indices:\n indices = [m.start() for m in re.finditer(\"/\", self.text)]\n\n if len(indices) > 1:\n try:\n # get current position\n index = indices[self.positions[word]]\n except KeyError:\n # the word is not saved yet\n index = indices[0]\n self.positions[word] = 0\n # increase current position\n self.positions[word] += 1\n else:\n index = indices[0]\n \n # now create the unique identifier\n if self.options.get(\"urirecipe\") == \"offset\":\n uri = \"offset_\"\n uri += str(index) + \"_\"\n uri += str(index + len(word)) + \"_\"\n elif self.options.get(\"urirecipe\") == \"context-hash\":\n con_len = self.options.get(\"context-length\")\n uri = \"hash_\"\n uri += str(con_len) + \"_\"\n uri += str(len(word)) + \"_\"\n context = self.text[max(0,index - con_len):index]\n context += \"(\" + word + \")\"\n context += self.text[index+len(word):min(len(self.text),index+len(word) + con_len)]\n uri += hashlib.md5(context).hexdigest() + \"_\"\n uri += word[:20]\n\n return prefix + urllib.quote(uri)", "def shorten_link(post):\n return f\"redd.it/{post.id}\"", "def recommend(user_id: int, titles=False, n_max=10):\n user_info = reader.UserList()\n user_matrix = user_info.user_matrix\n if user_id not in user_matrix.index:\n # new user\n print(\"User not in the list of users\")\n return recommend_for_new_user(titles, n_max)\n elif user_info.get_user_status_from_id(user_id) == 'new_user':\n print(\"New users: not enough information for collaborative recommendation\")\n return recommend_for_new_user(titles, n_max)\n else:\n return user_info.get_user_recomendations(user_id, titles, n_max)", "def status_url(self, username, id):\n return urllib.parse.urljoin(self.instance, f'/p/{urllib.parse.quote(username)}/{id}')", "def process_survey_link(survey_link, user):\r\n return survey_link.format(UNIQUE_ID=unique_id_for_user(user))", "def requestURL(userID): #@NoSelf", "def generate_url_reset(user):\n uid = int_to_base36(user.pk)\n token = default_token_generator.make_token(user)\n url = reverse('reset_password_new_user', args=[uid, token])\n return SITE_URL + url", "def GenerateUrl():\n params = {}\n params['client_id'] = Constants.USER['CLIENT_ID']\n params['redirect_uri'] = Constants.AUTH['REDIRECT']\n params['scope'] = Constants.AUTH['SCOPE']\n params['response_type'] = 'code'\n return '%s?%s' % (Constants.OAUTH, FormatUrl(params))", "def to_string(self):\n return \"User: {} Description: {} Ratings: {}\".format(self.id_user, self.description, self.ratings)", "def __str__(self):\n if self.recommend:\n review = 'recommended by {}: {}'.format(self.reviewer, self.comments)\n else:\n review = 'not recommended by {}: {}'.format(self.reviewer, self.comments)\n\n return review", "def add_favoriting_user_id(self, circuit_id, user_id):\n key = ':'.join(\n [CIRCUIT_FAV_USRS_1, \n str(circuit_id), \n CIRCUIT_FAV_USRS_2]\n )\n self.RS.sadd(key, user_id)", "def get_endpoint_help_url(user, endpoint):\n serializer = get_serializer()\n data = serializer.serialize(user, endpoint)\n return reverse('django-numerics-help', kwargs={'code': urlquote(data)})", "def get_absolute_url(self) -> str:\n return reverse(\"accounts:user-detail\", args=[str(self.user.id)])", "def tweet_url(username, id):\n return 'http://twitter.com/%s/status/%d' % (username, id)", "def redact_uri(uri):\n uri = ACCESS_TOKEN_RE.sub(r\"\\1<redacted>\\3\", uri)\n return CLIENT_SECRET_RE.sub(r\"\\1<redacted>\\3\", uri)", "def genQrelStr(queryId, docId, relGrade):\n return f'{queryId} 0 {docId} {relGrade}'", "def _get_uri_reference(self):\n ref_name, ref_val = next(iter(self._choose_reference().items()))\n if ref_name == 'sha1':\n return 'sha1/%s' % ref_val\n else:\n return 'ref/%s' % ref_val", "def makeFootnoteRefId(self, id):\n if self.getConfig(\"UNIQUE_IDS\"):\n return 'fnref%s%d-%s' % (self.sep, self.unique_prefix, id)\n else:\n return 'fnref%s%s' % (self.sep, id)", "def prepare_resource_uri(self, object):\n return '/api/v1/actor/{0}/'.format(object.id)", "def robo_avatar_url(user_data, size=80):\n hash = md5(str(user_data).strip().lower().encode('utf-8')).hexdigest()\n url = \"https://robohash.org/{hash}.png?size={size}x{size}\".format(\n hash=hash, size=size)\n return url", "def UriStrFor(iterated_uri, obj):\n return '%s://%s/%s' % (iterated_uri.scheme, obj.bucket.name, obj.name)", "def print_user(self, user):\n status = \"active\"\n token = user.token\n\n if token in [\"finished\", \"revoked\"]:\n status = token\n\n if token is None:\n token = \"\"\n\n subid = \"%s\\t%s[%s]\" % (user.id, token, status)\n print(subid)\n return subid", "def rate_review_for_user():\n values = flask.request.values\n review_id = values.get('review_id')\n voted_helpful = values.get('voted_helpful')\n review_type = values.get('review_type')\n\n uc_review = None\n filtered_courses = m.UserCourse.objects(id=review_id)\n if len(filtered_courses) > 0:\n uc = filtered_courses[0]\n if review_type == 'course':\n uc_review = uc.course_review\n else:\n uc_review = uc.professor_review\n else:\n filtered_courses = m.MenloCourse.objects(id=review_id)\n if len(filtered_courses) > 0:\n uc = filtered_courses[0]\n uc_review = uc.professor_review\n\n vote_added_response = api_util.jsonify({\n 'success': True\n })\n voted_already_response = api_util.jsonify({\n 'already_voted': True\n })\n\n user = _get_user_require_auth()\n if review_type == 'course':\n if review_id in user.voted_course_review_ids:\n return voted_already_response\n user.voted_course_review_ids.append(review_id)\n elif review_type == 'prof':\n if review_id in user.voted_prof_review_ids:\n return voted_already_response\n user.voted_prof_review_ids.append(review_id)\n user.save()\n\n if uc_review:\n if voted_helpful == 'true':\n uc_review.num_voted_helpful += 1\n else:\n uc_review.num_voted_not_helpful += 1\n uc.save()\n\n return vote_added_response", "def get_url(self, item):\n config = {}\n uuid = self.data.get('uuid', None)\n obj = uuidToObject(uuid)\n if uuid and obj:\n config = copy.copy(self.get_config(obj))\n\n url = u'{0}{1}'.format(self.view_url(obj), item.id.value)\n if config.get('modify_url', True):\n url = u'{0}___{1}-{2}'.format(\n url,\n item.title.value,\n item.location.value,\n )\n return url", "def recommend(user_id, ratings, movie_names, n_neighbors=10, n_recomm=5):\n \n # convert long to wide\n ratings_wide = ratings.pivot(index='user', columns='movie', values='rating')\n\n # all the items a user has not rated, that can be recommended\n all_items = ratings_wide.loc[user_id,:]\n unrated_items = all_items.loc[all_items.isnull()]\n \n # convert the index with item ids into Series values\n unrated_items = unrated_items.index.to_series(name='item_ids').reset_index(drop=True)\n print('User {} has {} unrated items.'.format(user_id, len(unrated_items)))\n \n # compute user similarities\n similarities = compute_similarities(user_id, ratings_wide)\n \n # generate predictions for unseen items based on the user similarity data\n predictions = unrated_items.apply(lambda d: predict_rating(d, ratings_wide, similarities, N=n_neighbors))\n \n # sort items by highest predicted rating\n predictions = predictions.sort_values(ascending=False)\n \n # recommend top N items\n recommends = predictions.head(n_recomm)\n \n # reformat the result\n recommends = recommends.to_frame(name='predicted_rating')\n recommends = recommends.rename_axis('movie_id')\n recommends = recommends.reset_index()\n \n recommends['name'] = recommends.movie_id.apply(lambda d: movie_names[d])\n \n return recommends", "def format_id(self, html=False):\n if self.term_type == 'C':\n full_id = 'KEGG:' + self.org_prefix + self.term_id\n else:\n full_id = 'KEGG:' + self.term_type\n\n if html:\n term_id = self.id_anchor_fmt % (self.url(), full_id)\n else:\n term_id = full_id\n return term_id", "def _format_id(ns, id):\n label = '%s:%s' % (ns, id)\n label = label.replace(' ', '_')\n url = get_identifiers_url(ns, id)\n return (label, url)", "def default_sub_generator(user):\n return str(user.id)", "def get_endpoint_url(user, endpoint):\n serializer = get_serializer()\n data = serializer.serialize(user, endpoint)\n return reverse('django-numerics-endpoint', kwargs={'code': urlquote(data)})", "def _uri_realm_creator(self, endpoint=\"json\", realm=None, uri=None, arguments=None):\n if realm is not None:\n uri = endpoint + '/' + realm + '/' + uri\n else:\n uri = endpoint + '/' + uri\n\n if arguments is not None:\n uri += arguments\n\n return uri", "def __BuildGetUrlRev(self, baseUrl, userName = \"\", limit = -1, since = -1, offset = -1):\n\n url = \"/\"\n if (userName == self.userName):\n if (since < 1):\n url += baseUrl\n else:\n url += baseUrl+\"/since\"+\"/\"+str(since)\n elif (userName == \"\"):\n if (since < 1):\n url += baseUrl+\"/all\"\n else:\n url += baseUrl+\"/\"+str(since)+\"/all_since\"\n else:\n if (since < 1):\n url += \"users/\"+userName+\"/\"+baseUrl\n else:\n url += \"users/\"+userName+\"/\"+baseUrl+\"/\"+\"since/\" + str(since)\n\n if (limit > 0 and offset == -1):\n url += \"?limit=\"+str(limit)\n elif (offset > 0 and limit == -1):\n url += \"?offset=\"+str(offset)\n elif (limit > 0 and offset > 0):\n url += \"?limit=\"+str(limit)+\"&offset=\"+str(offset)\n\n return url", "def compute_url_link(row):\n return f'https://twitter.com/-/status/{row[\"id\"]}'", "def user(request, user_id):\n raise NotImplementedError", "def generate_user_id() -> str:\n return 'u' + str((uuid.getnode()))", "def _get(self, user_id):\n user = DB_USER_TABLE.get(doc_id=int(user_id))\n if not user:\n flask_restful.abort(404, message=f\"User '{user_id}' not found!\")\n res = {\n \"id\" : user.doc_id\n }\n res.update(user)\n res['_links'] = self.make_links({\n \"self\" : User.get_self_url(user.doc_id),\n \"contained_in\" : UserList.get_self_url(),\n \"customers\" : UserCustomerList.get_self_url(user.doc_id),\n \"tickets\" : UserTicketList.get_self_url(user.doc_id)\n })\n return res", "def add_random_id(self, user_id, random_id, survey_url):\n if user_id not in self.user_id_to_random_ids:\n self.user_id_to_random_ids[user_id] = []\n self.user_id_to_survey_urls[user_id] = []\n self.user_id_to_random_ids[user_id].append(random_id)\n self.user_id_to_survey_urls[user_id].append(survey_url)", "def __insertandretrieve_recommendation(self, userid=None, itemid=None):\n def recommendation2rec(recommendationsip=None):\n recs = []\n for recommendation in recommendationsip:\n recs.append(self.__itemidx2id[recommendation[0]])\n return recs\n userid = str(userid)\n itemid = str(itemid)\n if userid in list(self.__userid2idx.keys()):\n useridx = self.__userid2idx[userid]\n recommendations = self.__recommender.recommend(useridx, self.__useritem, N=self._num_recommendations)\n recommendations = recommendation2rec(recommendationsip=recommendations)\n else:\n if itemid in list(self.__itemid2idx.keys()):\n itemidx = self.__itemid2idx[itemid]\n recommendations = self.__recommender.similar_items(itemidx, N=self._num_recommendations)\n recommendations = recommendation2rec(recommendationsip=recommendations)\n else:\n recommendations = list(self.__itemid2idx.keys())\n random.shuffle(recommendations)\n recommendations = recommendations[:self._num_recommendations]\n return recommendations", "def _format_sas_uri(id_scope: str, registration_id: str) -> str:\n return \"{id_scope}/registrations/{registration_id}\".format(\n id_scope=id_scope, registration_id=registration_id\n )", "def __repr__(self) -> str:\n return (f'dicomweb_client.URI(base_url={self.base_url!r}, '\n f'study_instance_uid={self.study_instance_uid!r}, '\n f'series_instance_uid={self.series_instance_uid!r}, '\n f'sop_instance_uid={self.sop_instance_uid!r}, '\n f'frames={self.frames!r}, suffix={self.suffix!r})')", "def viewurilink(uri) :\n\tname = schema.uri_to_name(uri)\n\tif name :\n\t\turl = '/view/name/' + quote(name)\n\telif uri[:7] == \"http://\" :\n\t\turl = '/view/uri/' + uri[7:]\n\telse :\n\t\turl = '/view/uri?id=' + uri\n\t\n\treturn '<a href=\"%s\">%s</a>' % (url, name or n.shorten(uri))", "def referral_report(referral_id):\n return f\"/app/dashboard/referral-detail/{referral_id}/draft-answer\"", "def build_uri(secret, name, initial_count=None, issuer_name=None,\n algorithm=None, digits=None, period=None):\n # initial_count may be 0 as a valid param\n is_initial_count_present = (initial_count is not None)\n\n # Handling values different from defaults\n is_algorithm_set = (algorithm is not None and algorithm != 'sha1')\n is_digits_set = (digits is not None and digits != 6)\n is_period_set = (period is not None and period != 30)\n\n otp_type = 'hotp' if is_initial_count_present else 'totp'\n base_uri = 'otpauth://{0}/{1}?{2}'\n\n url_args = {'secret': secret}\n\n label = quote(name)\n if issuer_name is not None:\n label = quote(issuer_name) + ':' + label\n url_args['issuer'] = issuer_name\n\n if is_initial_count_present:\n url_args['counter'] = initial_count\n if is_algorithm_set:\n url_args['algorithm'] = algorithm.upper()\n if is_digits_set:\n url_args['digits'] = digits\n if is_period_set:\n url_args['period'] = period\n\n uri = base_uri.format(otp_type, label, urlencode(url_args).replace(\"+\", \"%20\"))\n return uri", "def url(self):\n if self.term_type != 'C':\n url_fmt = self.path_level_url_fmt\n url_info = {'id': self.term_type}\n else:\n url_fmt = self.obj_level_url_fmt\n url_info = {'org_prefix': self.org_prefix, 'id': self.term_id}\n\n return url_fmt % url_info", "def __BuildGetUrl(self, baseUrl, userName = \"\", limit = -1, since = -1, offset = -1):\n\n url = \"/\"\n if (userName == self.userName):\n if (since < 1):\n url += baseUrl\n else:\n url += baseUrl+\"/\"+str(since)+\"/since\"\n elif (userName == \"\"):\n if (since < 1):\n url += baseUrl+\"/all\"\n else:\n url += baseUrl+\"/\"+str(since)+\"/all_since\"\n else:\n if (since < 1):\n url += \"users/\"+userName+\"/\"+baseUrl\n else:\n url += \"users/\"+userName+\"/\"+baseUrl+\"/\"+str(since)+\"/since\"\n\n if (limit > 0 and offset == -1):\n url += \"?limit=\"+str(limit)\n elif (offset > 0 and limit == -1):\n url += \"?offset=\"+str(offset)\n elif (limit > 0 and offset > 0):\n url += \"?limit=\"+str(limit)+\"&offset=\"+str(offset)\n\n return url", "def show_user_profile(user_id):\n\n user = User.query.filter_by(user_id=user_id).one()\n rating = Rating.query.filter_by(user_id=user_id).all()\n\n \n return render_template(\"user_detail.html\", user=user, rating=rating)", "def _format_url(s):\n return u'%s%s\\n' % (BASE_URL, s.get_absolute_url())", "def construct_path(id_val):\n id_val = str(id_val)\n path = id_val[:3] + \"/\" + id_val[3:6] + \"/\" + id_val[6:9] + \"/\"\n path += id_val\n return path", "def request_uri(self, identifier):\n path = self.PATH_TEMPLATE % (identifier, identifier)\n return self.api_baseurl + path", "def uri_string(self):\n if isinstance(self.entity, int):\n uri_string = \"{{{0}}}\".format(self.entity)\n elif isinstance(self.entity, NodePointer):\n uri_string = \"{{{0}}}\".format(self.entity.address)\n else:\n try:\n uri_string = self.entity.ref\n except AttributeError:\n uri_string = ustr(self.entity)\n if self.segments:\n if not uri_string.endswith(\"/\"):\n uri_string += \"/\"\n uri_string += \"/\".join(map(percent_encode, self.segments))\n return uri_string", "def make_link(id_: str, is_public: bool):\n return id_[:8] if is_public else id_[8:]", "def _build_api_request_uri(self, http_method=\"GET\"):\n return self.urlobject_single.format(self._cb.credentials.org_key, self._model_unique_id)", "def _build_api_request_uri(self, http_method=\"GET\"):\n return self.urlobject_single.format(self._cb.credentials.org_key, self._model_unique_id)", "def user_id_str(self):\n return str(self.status.user['id'])", "async def _idavatar(self, ctx, userid: int = None):\n e = discord.Embed(color=discord.Color.blurple())\n if not userid:\n user = ctx.author\n else:\n try:\n user = await ctx.bot.fetch_user(int(userid))\n if user is None:\n raise Exception(\"User is None.\")\n except Exception as e:\n await ctx.send(f\"Failed to catch user: {e}\")\n e.set_image(url=user.avatar_url)\n e.set_author(name=f\"{user.name}'s avatar\", icon_url=user.avatar_url, url=user.avatar_url)\n e.set_footer(text=f\"{ctx.author.name} wanted to see.\", icon_url=ctx.author.avatar_url)\n await ctx.send(embed=e)", "def _get_path(self, uid: Optional[Union[UUID, str]] = None,\n ignore_dataset: Optional[bool] = False) -> str:\n subpath = format_escaped_url('/{}', uid) if uid else ''\n if ignore_dataset:\n return format_escaped_url(self._dataset_agnostic_path_template + subpath,\n **self.__dict__)\n else:\n return format_escaped_url(self._path_template + subpath,\n **self.__dict__)", "def funding_round_url(uuid, user_key=API_KEY):\n return \"http://api.crunchbase.com/v/2/funding-round/%s?user_key=%s\" % (uuid, user_key)", "def recommend(self,\n user_id: int,\n number_of_recommendation: int,\n selected_algorithm: KNNBaseline or SVD) -> pd.DataFrame:\n # Creating the surprise models for reader and dataset\n # rating_scale indicates the range of given ratings\n reader = Reader(rating_scale=(1, 5))\n data = Dataset.load_from_df(self.ratings_df[['userId', 'movieId', 'rating']], reader)\n\n # Building whole trainset to train the algorithm\n train_dataset = data.build_full_trainset()\n # Building a test set from remaining part of tha dataset\n test_dataset = train_dataset.build_anti_testset()\n # Train and test the model\n recommendations = selected_algorithm.fit(train_dataset).test(test_dataset)\n # Store the accuracy of model with Root Mean Sqared Error\n rmse = accuracy.rmse(recommendations, verbose=False)\n print('Root Mean Squared Error is {}'.format(rmse))\n # Convert the recommendations into pd.Dataframe data type\n recommendations = pd.DataFrame(recommendations, columns=['userId', 'movieId', 'trueRating', 'estimatedRating', 'USELESS COLUMN']).drop(columns='USELESS COLUMN')\n # Merge the recommendations with self.movies_df in order to get additional informations of movie title and genres\n # Sort the values in descending ortder in order to show the most similar recommendations on the top\n recommendations = pd.merge(left=recommendations[recommendations['userId'] == user_id].sort_values(by='estimatedRating', ascending=False, ignore_index=True), right=self.movies_df, on='movieId')\n return recommendations.head(number_of_recommendation)", "def expand_user_refs(body, user_pattern):\n def repl(m):\n attributes = {}\n for a in RE_ATTRIBUTE.finditer(m.group(1)):\n a, v = a.group(1), a.group(2)\n attributes[a.lower()] = html.unescape(v)\n # Some <user...> elements have anomalous attributes\n for alt in ['name', 'comm']:\n if alt in attributes:\n attributes['user'] = attributes[alt]\n # Users with an underscore in the name turn into dashes when in a URL\n attributes['label'] = RE_UNDERSCORE.sub(\"-\", attributes['user'])\n if 'site' in attributes:\n # If a site is specified follow that\n attributes['url'] = 'https://{label}.{site}/profile'.format(\n **attributes)\n else:\n # Otherwise assume it's on this site\n attributes['url'] = user_pattern.format(attributes[\"label\"])\n return \"<a class=user href=\\\"{}\\\">{}</a>\".format(html.escape(attributes['url']), html.escape(attributes['user']))\n return RE_USER_REF.sub(repl, body)", "def get_recommendations_for_user(self, user_id):\r\n\r\n sql_command = \"\"\"\r\n SELECT event_id, score\r\n FROM UserRecommendations\r\n WHERE user_id = '{0}'\r\n ORDER BY score\r\n \"\"\".format(user_id)\r\n self.controller.execute(sql_command)\r\n\r\n return self.controller.fetchall()", "def draft_referrals_referral_detail(referral):\n return f\"/app/new-referral/{referral}\"", "def resource_uri(self):\n primary_key_value = getattr(self, self.primary_key(), None)\n return '/{}/{}'.format(self.endpoint(), primary_key_value)", "def id_to_uri(package_id: str, sticker_id: str) -> str:\n return StickerUtils.URI_SCHEME + \"://\" + package_id + \"/\" + sticker_id", "def build_auth_url(additional_scopes=[], client_id=''):\n user_scopes = ['Read & modify playback.'] + additional_scopes\n scopes = []\n for scope in AUTH_SCOPES_MAPPING:\n if scope['name'] in user_scopes:\n scopes += scope['scopes']\n\n auth_url = (\n 'https://accounts.spotify.com/authorize?client_id={}'\n '&response_type=code&redirect_uri={}&scope={}&state={}'\n .format(\n client_id or CLIENT_ID,\n ul.quote_plus(REDIRECT_URI),\n ul.quote_plus(\" \".join(scopes)),\n uuid1(),\n )\n )\n return auth_url", "def _product_reviews_url(self, url):\n temp_url = re.sub('/dp/', '/product-reviews/', url)\n return re.sub('ref=(.+)\\?', 'cm_cr_pr_top_link_1', temp_url)", "def get_substitute_for(userid):", "def get_avatar_url_for_user(user_id: UserID) -> str | None:\n avatar_urls_by_user_id = get_avatar_urls_for_users({user_id})\n return avatar_urls_by_user_id.get(user_id)", "def createUniqueRatingId():\n #connector = appEngine.connect()\n ratingID = 'r' + str(ceil(time.time()))\n return ratingID", "def get_history(user):\n if user in resteems and user in honours:\n return \"**\"+str(resteems[user])+\"** Resteems, **\"+str(honours[user])+\"** Honours\"\n elif user in resteems:\n return \"**\"+str(resteems[user])+\"** Resteems, **0** Honours\"\n elif user in honours:\n return \"**0** Resteems, **\"+str(honours[user])+\"** Honours\"\n else:\n return \"**0** Resteems, **0** Honours\"", "def __insertandretrieve_recommendation(self, userid=None, itemid=None):\n def recommendation2rec(recommendationsip=None):\n recs = []\n for recommendation in recommendationsip:\n recs.append(self.__itemidx2id[recommendation[0]])\n return recs\n userid = str(userid)\n itemid = str(itemid)\n if userid in list(self.__userid2idx.keys()):\n useridx = self.__userid2idx[userid]\n userarray = numpy.asarray([useridx, ] * len(self.__itemidx2id.keys()))\n itemarray = numpy.asarray(list(self.__itemidx2id.keys()))\n predicted_ratings = self.__recommender1.predict([userarray, itemarray], batch_size=10, verbose=0)\n item_rating = {}\n for item, pr in zip(itemarray, predicted_ratings):\n item_rating[item] = pr[0]\n recommendations = sorted(item_rating.items(), key=lambda value: value[1], reverse=True)[:self._num_recommendations]\n recommendations = recommendation2rec(recommendationsip=recommendations)\n else:\n if itemid in list(self.__itemid2idx.keys()):\n itemidx = self.__itemid2idx[itemid]\n recommendations = self.__recommender2.similar_items(itemidx, N=self._num_recommendations)\n recommendations = recommendation2rec(recommendationsip=recommendations)\n else:\n recommendations = list(self.__itemid2idx.keys())\n random.shuffle(recommendations)\n recommendations = recommendations[:self._num_recommendations]\n return recommendations", "def recommend(user_id):\n\n df = pd.read_sql(DATABASE_URL, index_col=\"id\", columns=[\"sex\", \"age\", \"haversine_distance\"])\n\n k = 5\n similarity = get_demographic_similarity(df, user_id)\n similarity = similarity.sort()[::-1]\n\n users = similarity[1:1 + k]\n\n # Get the charities then select the most common\n charity_counts = {}\n for user in users:\n charity_counts.ad", "def get_password_reset_url(request, user):\n signer = TimestampSigner(salt=settings.RESET_SALT)\n token = signer.sign('{}'.format(user.uuid))\n return request.build_absolute_uri('/#/password-reset/?token={}'.format(token))", "def _unique_path(user_id, filename, category='images'):\n ext = os.path.splitext(filename)[-1]\n new_filename = '{}{}'.format(uuid.uuid4(), ext)\n return os.path.join(category, str(user_id), new_filename)", "def create_uri(uri):\n return URIRef(uri)", "def set_uri(self, uri):\n # Parse URI\n parsed_uri = urllib.parse.urlparse(uri)\n # Separate out the user ID for HydroShare users\n contributor_pk = os.path.basename(parsed_uri.path.strip('/'))\n # Make sure this is a HydroShare user URI\n is_hs_user_uri = False\n try:\n validate_hydroshare_user_id(contributor_pk)\n is_hs_user_uri = True\n except ValidationError:\n pass\n\n if is_hs_user_uri:\n # Set rel_uri\n self.rel_uri = parsed_uri.path\n pk = None\n try:\n pk = int(contributor_pk)\n except ValueError:\n msg = \"User ID {0} is not an integer. User URI was {1}.\"\n raise GenericResourceMeta.ResourceMetaException(msg)\n\n assert (pk is not None)\n self.id = pk\n\n self.uri = uri", "def _format_caller(call_user, phone):\n # The phone number is private or not provided\n if not phone:\n return 'dolt nummer'\n\n if is_valid_phone_number(phone):\n # Set the phone number as a clickable link\n caller = '<tel:%s|%s>' % (phone, phone)\n else:\n caller = phone\n\n if call_user is not None:\n caller = '%s %s (%s)' % (\n call_user['first_name'],\n call_user['last_name'],\n caller\n )\n\n return caller", "def get_absolute_url(self):\n return '/profile/%s' % self.id", "def __str__(self):\r\n return str(self.userid)", "def get_username_and_id(self, obj):\n return \"%s - %s\" % (obj.user.username, obj.user.id)", "def recommend(r ,username, users):\r\n # first find nearest neighbor\r\n nearest = computeNearestNeighbor(r, username, users)[0][1]\r\n recommendations = []\r\n # now find bands neighbor rated that user didn't\r\n neighborRatings = users[nearest]\r\n userRatings = users[username]\r\n for artist in neighborRatings:\r\n if not artist in userRatings:\r\n recommendations.append((artist, neighborRatings[artist]))\r\n # using the fn sorted for variety - sort is more efficient\r\n return sorted(recommendations, key=lambda artistTuple: artistTuple[1], reverse = True)", "def get_absolute_url(self) -> str:\n return \"/users/%s/\" % self.email", "def _get_recommend(self, user):\n return self.user_cf.calculate(target_user_id=user, user_n=self.user_n,\n item_n=self.item_n, type=2)", "def urlsafe(self):\n # This is 3-4x faster than urlsafe_b64decode()\n urlsafe = base64.b64encode(self.reference().Encode())\n return urlsafe.rstrip('=').replace('+', '-').replace('/', '_')", "def get_absolute_url(self):\n return reverse('accountInfo-detail', args=[str(self.uid)])", "def make_req_url(user, repo, endpoint, limit=50, queries=None):\n url = \"%s%s/%s/%s\" % (API_BASE_URL, user, repo, endpoint)\n\n # Set limit is given and is above 50, set limit to 50\n if limit and limit > 50:\n limit = 50\n url += \"?limit=%d\" % limit\n\n # Add additional query parameters\n if queries:\n for key in queries:\n url += \"&%s=%s\" % (key, queries[key])\n return url", "def format_username(self, at_char, user):\r\n return u'<a href=\"http://{domain}/user/{user}\" data-user=\"{user}\">{char}{user}</a>'.format(\r\n **dict(domain=self.domain, user=user, char=at_char, text=user))\r\n\r\n #return u'<a href=\"http://%s/user/%s\" data-user=\"\">%s%s</a>' \\\r\n # % (self.domain, user, at_char, user)\r" ]
[ "0.5987479", "0.5844432", "0.57443655", "0.5638544", "0.5572798", "0.54887694", "0.5328215", "0.531546", "0.5313541", "0.5249727", "0.5235717", "0.5221336", "0.5174196", "0.51600695", "0.51578987", "0.5155158", "0.5142916", "0.5137194", "0.5137102", "0.510324", "0.50865287", "0.5073616", "0.50135016", "0.4983757", "0.49675778", "0.4964927", "0.49620578", "0.49209478", "0.4914215", "0.4907565", "0.489475", "0.48591143", "0.4856857", "0.48453695", "0.4824475", "0.48241714", "0.48182166", "0.48051605", "0.4803196", "0.48017216", "0.47985417", "0.47940034", "0.47774178", "0.47747126", "0.47658515", "0.4759685", "0.47531304", "0.4752945", "0.47525793", "0.47510815", "0.47454634", "0.4745009", "0.47351637", "0.47178537", "0.47166914", "0.47145236", "0.47115678", "0.47067115", "0.47061196", "0.46929106", "0.46894076", "0.46873018", "0.46808276", "0.46779433", "0.46774402", "0.4674197", "0.4674197", "0.46708497", "0.46589833", "0.46550333", "0.4654299", "0.4650437", "0.46471938", "0.46454903", "0.46435466", "0.4637251", "0.46350408", "0.46280366", "0.46273804", "0.4625838", "0.46252534", "0.4624667", "0.46240392", "0.46239173", "0.46237686", "0.4623197", "0.4616299", "0.46118814", "0.46096975", "0.46094036", "0.46090883", "0.45849532", "0.4570258", "0.45612743", "0.45596084", "0.45492435", "0.45479733", "0.4546035", "0.45428896", "0.4542464" ]
0.46228266
86
Create a formatted mail message to sent to worker user. This method pass the user and evaluation_link to be processed by txt template alerts/subscription_message.txt
def alert_subscription_message(request, user): message = loader.get_template( 'alerts/subscription_message.txt').render( {'user': user, 'evaluation_link': resolve_worker_evaluation_url(request, user)}) return message
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_created_email(self):\n if settings.NOTIFY_NEW_REG:\n to = settings.NOTIFY_NEW_REG\n message = \"\"\"\\\nGreetings,<br><br>\n\nA new vehicle registration has been submitted by %s.<br><br>\n\nGo here to view or edit the request: <br>\n<a href=\"%s\">%s</a>\n<br><br>\nSincerely,<br><br>\nThe Janelia Parking Permit Program\n \"\"\" % (self.user_display_name(), self.get_edit_url(True), self.get_edit_url(True))\n subject = 'A new parking permit request has been entered'\n from_email = 'parkingpermit-donotreply@janelia.hhmi.org'\n text_content = re.sub(r'<[^>]+>','',message)\n html_content = message\n msg = EmailMultiAlternatives(subject, text_content, from_email, to)\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()", "def get_second_trial_communication_email(account):\n\n SUBJECT = \"Foojal: Don't lose out.\"\n EMAIL_CONTENT = \"\"\"\n\nHello %s\n\nJust checking to see how you are liking your Foojal.com trial subscription.\n\nSign up today for a full year of Foojal.com for only $24.00 a year before we increase the price.\nThat's only $2.00 a month.\n\nIf you have any questions during your trial period, please email us; we would\nlove to talk with you.\n\nThank you, Kathy and Adam\n%s\"\"\"\n\n message = EmailMessage()\n message.sender = settings.SITE_EMAIL\n message.to = account.user.email()\n message.subject = SUBJECT\n message.body = EMAIL_CONTENT % (account.nickname, settings.SITE_EMAIL)\n return message", "def send_welcome_email(cls, user):\n\n cls.customise_auth_messages()\n auth_messages = current.auth.messages\n\n # Look up CMS template for welcome email\n try:\n recipient = user[\"email\"]\n except (KeyError, TypeError):\n recipient = None\n if not recipient:\n current.response.error = auth_messages.unable_send_email\n return\n\n\n db = current.db\n s3db = current.s3db\n\n settings = current.deployment_settings\n\n # Define join\n ctable = s3db.cms_post\n ltable = s3db.cms_post_module\n join = ltable.on((ltable.post_id == ctable.id) & \\\n (ltable.module == \"auth\") & \\\n (ltable.resource == \"user\") & \\\n (ltable.deleted == False))\n\n # Get message template\n query = (ctable.name == \"WelcomeMessageInvited\") & \\\n (ctable.deleted == False)\n row = db(query).select(ctable.doc_id,\n ctable.body,\n join = join,\n limitby = (0, 1),\n ).first()\n if row:\n message_template = row.body\n else:\n # Disabled\n return\n\n # Look up attachments\n dtable = s3db.doc_document\n query = (dtable.doc_id == row.doc_id) & \\\n (dtable.file != None) & (dtable.file != \"\") & \\\n (dtable.deleted == False)\n rows = db(query).select(dtable.file)\n attachments = []\n for row in rows:\n filename, stream = dtable.file.retrieve(row.file)\n attachments.append(current.mail.Attachment(stream, filename=filename))\n\n # Default subject from auth.messages\n system_name = s3_str(settings.get_system_name())\n subject = s3_str(auth_messages.welcome_email_subject % \\\n {\"system_name\": system_name})\n\n # Custom message body\n data = {\"system_name\": system_name,\n \"url\": settings.get_base_public_url(),\n \"profile\": URL(\"default\", \"person\", host=True),\n }\n message = formatmap(message_template, data)\n\n # Send email\n success = current.msg.send_email(to = recipient,\n subject = subject,\n message = message,\n attachments = attachments,\n )\n if not success:\n current.response.error = auth_messages.unable_send_email", "def send_emails_to_subscribers(creator_id, exploration_id, exploration_title):\n\n creator_name = user_services.get_username(creator_id)\n email_subject = ('%s has published a new exploration!' % creator_name)\n email_body_template = (\n 'Hi %s,<br>'\n '<br>'\n '%s has published a new exploration! You can play it here: '\n '<a href=\"https://www.oppia.org/explore/%s\">%s</a><br>'\n '<br>'\n 'Thanks, and happy learning!<br>'\n '<br>'\n 'Best wishes,<br>'\n '- The Oppia Team<br>'\n '<br>%s')\n\n if not feconf.CAN_SEND_EMAILS:\n log_new_error('This app cannot send emails to users.')\n return\n\n if not feconf.CAN_SEND_SUBSCRIPTION_EMAILS:\n log_new_error('This app cannot send subscription emails to users.')\n return\n\n recipient_list = subscription_services.get_all_subscribers_of_creator(\n creator_id)\n recipients_usernames = user_services.get_usernames(recipient_list)\n recipients_preferences = user_services.get_users_email_preferences(\n recipient_list)\n for index, username in enumerate(recipients_usernames):\n if recipients_preferences[index].can_receive_subscription_email:\n email_body = email_body_template % (\n username, creator_name, exploration_id,\n exploration_title, EMAIL_FOOTER.value)\n _send_email(\n recipient_list[index], feconf.SYSTEM_COMMITTER_ID,\n feconf.EMAIL_INTENT_SUBSCRIPTION_NOTIFICATION,\n email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)", "def send_email_to_trial_user_with_link(\n to, context, from_email=settings.DEFAULT_FROM_EMAIL):\n template = EMAIL_DICT['parse_trial_user_resume']['template']\n subject = EMAIL_DICT['parse_trial_user_resume']['subject']\n return threadify(_send, to, context, subject, from_email, template)", "def render_email(self, user):\n if user.get_profile().subscribed_to_news:\n return EmailMessage(self.subject, self.body, from_email='anon@example.com', to=[user.email])", "def mail_text(self, template_name, subject, send_to=None, user = None, **kwargs):\n if user is None:\n user = self.user\n if send_to is None:\n send_to = user.email\n payload = self.render_lang(template_name, **kwargs)\n mailer = self.app.module_map['mail']\n mailer.mail(send_to, subject, payload)", "def mail_text(self, template_name, subject, send_to=None, user = None, **kwargs):\n if user is None:\n user = self.user\n if send_to is None:\n send_to = user.email\n payload = self.render_lang(template_name, **kwargs)\n mailer = self.app.module_map['mail']\n mailer.mail(send_to, subject, payload)", "def send_welcome_email(user):\n\n register.customise_auth_messages()\n auth_messages = current.auth.messages\n\n try:\n recipient = user[\"email\"]\n except (KeyError, TypeError):\n recipient = None\n if not recipient:\n current.response.error = auth_messages.unable_send_email\n return\n\n # Look up CMS template for welcome email\n db = current.db\n s3db = current.s3db\n\n settings = current.deployment_settings\n\n # Define join\n ctable = s3db.cms_post\n ltable = s3db.cms_post_module\n join = ltable.on((ltable.post_id == ctable.id) & \\\n (ltable.module == \"auth\") & \\\n (ltable.resource == \"user\") & \\\n (ltable.deleted == False))\n\n # Get message template\n query = (ctable.name == \"WelcomeMessage\") & \\\n (ctable.deleted == False)\n row = db(query).select(ctable.doc_id,\n ctable.body,\n join = join,\n limitby = (0, 1),\n ).first()\n if row:\n message_template = row.body\n else:\n # Disabled\n return\n\n # Look up attachments\n dtable = s3db.doc_document\n query = (dtable.doc_id == row.doc_id) & \\\n (dtable.file != None) & (dtable.file != \"\") & \\\n (dtable.deleted == False)\n rows = db(query).select(dtable.file)\n attachments = []\n for row in rows:\n filename, stream = dtable.file.retrieve(row.file)\n attachments.append(current.mail.Attachment(stream, filename=filename))\n\n # Default subject from auth.messages\n system_name = s3_str(settings.get_system_name())\n subject = s3_str(auth_messages.welcome_email_subject % \\\n {\"system_name\": system_name})\n\n # Custom message body\n data = {\"system_name\": system_name,\n \"url\": settings.get_base_public_url(),\n \"profile\": URL(\"default\", \"person\", host=True),\n }\n message = formatmap(message_template, data)\n\n # Send email\n success = current.msg.send_email(to = recipient,\n subject = subject,\n message = message,\n attachments = attachments,\n )\n if not success:\n current.response.error = auth_messages.unable_send_email", "def email_body_appointment_confirmation_for_seller(meeting, buyer_profile, sellr_profile, msg_user_link='https://INSPRITE.co/message/USER'):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">Fantastic! You accepted <a href=\"https://127.0.0.1:5000/profile?' + buyer_profile.prof_id + '\" style=\"color:#1488CC\">' + buyer_profile.prof_name + '\\'s proposal.</a><br><br>'\n\tmsg = msg + '\\t\\t\\t Check out the details:<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tTime: ' + meeting.meet_ts.strftime('%A, %b %d, %Y %H:%M %p') + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tDuration: ' + meeting.get_duration_in_hours() + ' hours<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tLocation: ' + str(meeting.meet_location) + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tFee: $' + str(meeting.meet_cost) + '<br><br>'\n\tmsg = msg + '\\t\\t\\t Need to edit, manage or update the appointment? <a href=\"https://127.0.0.1:5000/dashboard\" style=\"color:#1488CC\">Go for it</a>, or send <a href=\"' + msg_user_link + '\" style=\"color:#1488CC\"> ' + buyer_profile.prof_name + ' a message.</a><br><br>We know life can be busy, so we\\'ll send you a reminder 24 hours in advance too.</font>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;padding-left:75px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\n\\t\\t\\t<img style=\"padding-right: 6px\" src=\"http://maps.googleapis.com/maps/api/staticmap?center=' + meeting.meet_location + '&zoom=15&size=400x450&markers=size:large%8Ccolor:0xFFFF00%7Clabel:Insprite%7C' + meeting.meet_location + '\"><br>'\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t</table>'\n\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<font style=\"font-family:Helvetica Neue;color:#555555;font-size:10px;\"><a href=\"mailto:thegang@insprite.co\" style=\"color:#1488CC\">Contact Us</a> '\n\tmsg = msg + '| Sent by <a href=\"https://insprite.co\" style=\"color:#1488CC\">Insprite</a>, California, USA. | <a href=\"#\" style=\"color:#1488CC\">Unsubscribe</a></font><br>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr> <td style=\"border-top: 0px solid #333333; border-bottom: 0px solid #FFFFFF;\">'\n\tmsg = msg + '<img width=\"596px\" src=\"http://ryanfbaker.com/insprite/footerImage.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def render(self, test_mode=False, html_only=False):\n context = aq_inner(self.context)\n if not context.auto_enabled:\n return 'N/A'\n\n now = datetime.now()\n wtool = getToolByName(context, 'portal_workflow')\n soup = getSoup(self.context, config.SUBSCRIBERS_SOUP_ID)\n # strftime accepts any text, not only strftime characters\n subject = now.strftime(context.auto_subject.encode('utf-8'))\n url = context.absolute_url() + '/subscription?uuid=%(uuid)s'\n footer_text = context.footer.output.replace('${url}', '$url')\n footer_text = footer_text.replace('$url', url)\n count = 0\n base_text = ''\n if context.auto_text:\n base_text += now.strftime(context.auto_text.output.encode('utf-8')) + '\\n'\n providers = self._providers()\n gid = 'issue-%s' % now.strftime(\"%Y-%m-%d-%H-%M-%S.%f\")\n idx = 0\n while context.check_id(gid): # python script in skins\n idx += 1\n gid = 'issue-%s-%d' % (now.strftime(\"%Y-%m-%d-%H-%M-%S.%f\"), idx)\n # create anonymous issue text to be stored to portal\n text = safe_unicode(base_text)\n auto_text = u''\n provider_names = []\n\n for p in providers:\n auto_text += safe_unicode(p.get_gazette_text(context, None))\n provider_names.append(repr(p))\n\n if not auto_text:\n # There is no automatically geenrated text. Discard sending of newsletter.\n return 'Nothing to send'\n\n text = text + auto_text\n # Create PDF version of the newsletter using wkhtml2pdf as archive of the issue\n pdf_raw = self.make_pdf(text, html_only)\n if not pdf_raw:\n logger.warning('Unable to create PDF of automatically issued gazette.')\n if not test_mode:\n # create Gazette object representing this issue\n gid = context.invokeFactory('gazette.GazetteIssue', gid)\n gazette = context[gid]\n # Fill the newly create Gazette object with generated data\n gazette.title = subject\n gazette.text = RichTextValue(text, mimeType='text/html', outputMimeType='text/html')\n gazette.providers = provider_names\n gazette.sent_at = now\n try:\n # ignore if there is no publish option for now\n wtool.doActionFor(gazette, 'publish')\n except:\n pass\n # Attach PDF to gazette but only if it is not HTML only mode\n if pdf_raw and not html_only:\n fid = gazette.invokeFactory('File', gid + '.pdf')\n file_pdf = gazette[fid]\n file_pdf.setTitle(gazette.title)\n file_pdf.setFile(pdf_raw, mimetype='application/pdf')\n file_pdf.processForm()\n\n for s in soup.query(active=True):\n # returns email and fullname taken from memberdata if s.username is set and member exists\n subscriber_info = s.get_info(context)\n footer = footer_text % subscriber_info\n mail_text = \"\"\n if subscriber_info['salutation']:\n mail_text += \"%s<br /><br />\" % subscriber_info['salutation']\n mail_text += \"%s------------<br />%s\" % (text, footer)\n try:\n if utils.send_mail(context, None, subscriber_info['email'], subscriber_info['fullname'], subject, mail_text):\n count += 1\n except (SMTPException, SMTPRecipientsRefused):\n pass\n context.most_recent_issue = gazette\n else:\n if html_only:\n self.request.response.setHeader('Content-Type', 'text/html;charset=utf-8')\n else:\n self.request.response.setHeader('Content-Type', 'application/pdf')\n return pdf_raw\n\n return str(count)", "def send_admin_notification_callback(sender, **kwargs):\r\n user = kwargs['user']\r\n\r\n studio_request_email = settings.FEATURES.get('STUDIO_REQUEST_EMAIL', '')\r\n context = {'user_name': user.username, 'user_email': user.email}\r\n\r\n subject = render_to_string('emails/course_creator_admin_subject.txt', context)\r\n subject = ''.join(subject.splitlines())\r\n message = render_to_string('emails/course_creator_admin_user_pending.txt', context)\r\n\r\n try:\r\n send_mail(\r\n subject,\r\n message,\r\n studio_request_email,\r\n [studio_request_email],\r\n fail_silently=False\r\n )\r\n except SMTPException:\r\n log.warning(\"Failure sending 'pending state' e-mail for %s to %s\", user.email, studio_request_email)", "def create_email(user):\n if 'research' in user.get_domains():\n domain = 'research'\n else: domain = 'academic'\n subject = \"ECE/CIS Account Created\"\n helprequest = \"https://www.eecis.udel.edu/service\"\n \n message = \"Your ECE/CIS %s account has been created with the username: %s\\n\\n\" % (domain, user.username)\n message += \"Please do not reply to this message. If you need assistance with your account, please visit:\\n\"\n message += \"%s\\n\\n\" % helprequest\n message += \"-- EE/CIS Labstaff\\n\"\n\n send('account@eecis.udel.edu', 'ECE/CIS Account System', \\\n [user.email], subject, message, MAILHOST)", "def get_last_trial_communication_email(account):\n\n SUBJECT = \"Foojal: Your trial is over!\"\n EMAIL_CONTENT = \"\"\"\n\nHello %s\n\nWe hope you liked your Foojal.com trial and that you will join us for a full year for only $24.00.\n\nTo get a full year subscription to the best online photo food journal, go to your account page at http://app.foojal.com/account.\n\nIf you have any questions, please email us; we would love to talk with you.\n\nThank you, Kathy and Adam\n\n\"\"\"\n message = EmailMessage()\n message.sender = settings.SITE_EMAIL\n message.to = account.user.email()\n message.subject = SUBJECT\n message.body = EMAIL_CONTENT % account.nickname\n return message", "def get_first_trial_communication_email(account):\n\n SUBJECT = 'Foojal: First couple of days'\n EMAIL_CONTENT = \"\"\"\n\nHello %s\n\nJust checking to see how you are liking your first few days of Foojal.com.\nIf you have any questions during your trial period, please email us; we would\nlove to talk with you.\n\nYour Team:\n%s\"\"\"\n\n message = EmailMessage()\n message.sender = settings.SITE_EMAIL\n message.to = account.user.email()\n message.subject = SUBJECT\n message.body = EMAIL_CONTENT % (account.nickname, settings.SITE_EMAIL)\n return message", "def send_welcome_mail(backend, details, response, user, is_new=False, *args, **kwargs):\n\n if is_new:\n context = Context({'user': user, 'ga_campaign_params' : 'utm_source=unishared&utm_content=v1&utm_medium=e-mail&utm_campaign=welcome_mail'})\n\n email_task.apply_async([u'Welcome on UniShared!', context, 'welcome_mail', [user.email]], eta= datetime.utcnow() + timedelta(hours=1))", "def send_mail_to_student(student, param_dict):\r\n\r\n # add some helpers and microconfig subsitutions\r\n if 'course' in param_dict:\r\n param_dict['course_name'] = param_dict['course'].display_name_with_default\r\n\r\n param_dict['site_name'] = microsite.get_value(\r\n 'SITE_NAME',\r\n param_dict['site_name']\r\n )\r\n\r\n subject = None\r\n message = None\r\n\r\n # see if we are running in a microsite and that there is an\r\n # activation email template definition available as configuration, if so, then render that\r\n message_type = param_dict['message']\r\n\r\n email_template_dict = {\r\n 'allowed_enroll': (\r\n 'emails/enroll_email_allowedsubject.txt',\r\n 'emails/enroll_email_allowedmessage.txt'\r\n ),\r\n 'enrolled_enroll': (\r\n 'emails/enroll_email_enrolledsubject.txt',\r\n 'emails/enroll_email_enrolledmessage.txt'\r\n ),\r\n 'allowed_unenroll': (\r\n 'emails/unenroll_email_subject.txt',\r\n 'emails/unenroll_email_allowedmessage.txt'\r\n ),\r\n 'enrolled_unenroll': (\r\n 'emails/unenroll_email_subject.txt',\r\n 'emails/unenroll_email_enrolledmessage.txt'\r\n ),\r\n 'add_beta_tester': (\r\n 'emails/add_beta_tester_email_subject.txt',\r\n 'emails/add_beta_tester_email_message.txt'\r\n ),\r\n 'remove_beta_tester': (\r\n 'emails/remove_beta_tester_email_subject.txt',\r\n 'emails/remove_beta_tester_email_message.txt'\r\n ),\r\n }\r\n\r\n subject_template, message_template = email_template_dict.get(message_type, (None, None))\r\n if subject_template is not None and message_template is not None:\r\n subject = render_to_string(subject_template, param_dict)\r\n message = render_to_string(message_template, param_dict)\r\n\r\n if subject and message:\r\n # Remove leading and trailing whitespace from body\r\n message = message.strip()\r\n\r\n # Email subject *must not* contain newlines\r\n subject = ''.join(subject.splitlines())\r\n from_address = microsite.get_value(\r\n 'email_from_address',\r\n settings.DEFAULT_FROM_EMAIL\r\n )\r\n\r\n send_mail(subject, message, from_address, [student], fail_silently=False)", "def stock_email_blast(stock_dict, notification_time):\n\n with bigbeta_app.app_context():\n print('sending email')\n user_list = build_users_list()\n msg = Message('Big Mover in the Market!',\n sender=email_sender,\n recipients=['jonmbrenner@gmail.com'])\n # recipients=[user_list])\n msg.body = f\"\"\"\\\n!!!HIGH SHORT INTEREST MOVER ALERT!!!\n${stock_dict['ticker']}\nShort Interest: {stock_dict['short_interest']}\nFloat: {stock_dict['free_float']}\nDays to Cover: {stock_dict['dtc']}\nRelative Volume: {stock_dict['rvol']}\nNews Catalysts: {stock_dict['stories']}\n\nLast Price: {stock_dict['last_price']} collected at {cur_tm_log}\nNotification kicked off at {notification_time} EST\n\nGo get it!\n- BigBeta Team\n\"\"\"\n\n mail.send(msg)\n\n return None", "def send_activation_email(self, user):\n activation_key = self.get_activation_key(user)\n context = self.get_email_context(activation_key)\n context[\"user\"] = user\n subject = render_to_string(\n template_name=self.email_subject_template,\n context=context,\n request=self.request,\n )\n # Force subject to a single line to avoid header-injection\n # issues.\n subject = \"\".join(subject.splitlines())\n message = render_to_string(\n template_name=self.email_body_template,\n context=context,\n request=self.request,\n )\n user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)", "def notify_user(self, svno, ops):\n\n self.sr=svno\n self.ops=ops\n try:\n from email.mime.text import MIMEText\n from email.mime.multipart import MIMEMultipart\n except Exception, imperr:\n print(\"emailNotify failure - import error %s\" % imperr)\n return(-1)\n nHtml = []\n noHtml = \"\"\n clientEmail = ['helpdesk@mscsoftware.com']\n msg = MIMEMultipart()\n # This is the official email notifier\n rtUser = 'DONOTREPLY@mscsoftware.com'\n\n msg['From'] = rtUser\n msg['To'] = \", \".join(clientEmail)\n if self.data['groupid'] == 'Nastran-RG':\n msg[\"Cc\"] = \"msc-itsupport@mscsoftware.com,\\\n DL-ENG-BUILD@mscsoftware.com,\\\n raj.behera@mscsoftware.com\"\n elif self.data['groupid'] == 'Patran-RG':\n msg[\"Cc\"] = \"msc-itsupport@mscsoftware.com,\\\n DL-ENG-BUILD@mscsoftware.com,\\\n raj.behera@mscsoftware.com\"\n else:\n msg[\"Cc\"] = \"msc-itsupport@mscsoftware.com,\\\n DL-ENG-BUILD@mscsoftware.com,\\\n raj.behera@mscsoftware.com\"\n\n if self.ops == 'ipnw':\n msg['Subject'] = '%s regression got impacted due \\\n to vCAC cloud for VMID %s' % \\\n ( pdict[self.data['groupid']], self.sr['requestNumber'])\n else:\n msg['Subject'] = '%s regression got impacted due \\\n to vCAC cloud for service request: %s' % \\\n ( pdict[self.data['groupid']], self.sr['requestNumber'])\n\n nHtml.append(\"<html> <head></head> <body> <p>Jenkin's \\\n vCAC cloud client notification<br>\")\n nHtml.append(\"<b>Hi Helpdesk,</b><br><br><br>\")\n nHtml.append(\"Please create a ticket to solve the \\\n following problem and notify infra team.\")\n if self.ops == 'ipnw':\n nHtml.append(\"VM creation readiness from vCAC \\\n cloud is taking long time, \\\n vm creation service request completed, \\\n But network configuration is having an issue \\\n for VMID <b>%s</b> is stuck. \" % self.sr['requestNumber'])\n else:\n nHtml.append(\"Creation of VM through vCAC cloud is taking \\\n longer time than expected, the service \\\n request <b>%s</b> is stuck. \" % self.sr['requestNumber'])\n\n nHtml.append(\"Regression test for product <b>%s</b> \\\n is stuck and impacted.<br><br>\" % \\\n pdict[self.data['groupid']])\n if os.path.isdir(self.data['rundir']):\n jnfilepath=os.path.join(self.data['rundir'], 'hudjobname.dat')\n if os.path.isfile(jnfilepath):\n lines = [line.rstrip() for line in open(jnfilepath)]\n nHtml.append(\"Please follow job link for \\\n SR# related information.<br>\")\n nHtml.append(\"Jenkins Effected Job URL: <a href=%s> \\\n Effected Build Console \\\n </a><br><br><br>\" % (lines[0]))\n\n nHtml.append(\"This needs immediate attention.<br><br>\")\n nHtml.append(\"Regards,<br>\")\n nHtml.append(\"Rtest Administrator.<br>\")\n nHtml.append(\"[Note: This is an automated mail,\\\n Please do not reply to this mail.]<br>\")\n nHtml.append(\"</p> </body></html>\")\n noHtml = ''.join(nHtml)\n noBody = MIMEText(noHtml, 'html')\n msg.attach(noBody)\n s = smtplib.SMTP('postgate01.mscsoftware.com')\n s.sendmail(rtUser, [clientEmail] + msg[\"Cc\"].split(\",\"), msg.as_string())\n s.quit()\n return 0", "def generate_web_service_email(details):\n subject = details[\"subject\"]\n body = details[\"message\"]\n from_email = settings.DEFAULT_FROM_ADDR\n reply_to_email = [settings.EMAIL_TARGET_W]\n to_email = details[\"email_to\"]\n\n email = GenericEmailGenerator(subject=subject, to_emails=to_email, bcc=reply_to_email, from_email=from_email,\n reply_to=reply_to_email, body=body, context={'mrkdwn': True})\n\n return email", "def _render_mail(self, rebuild, success, canceled):\n subject_template = 'Image %(image)s; Status %(endstate)s; Submitted by %(user)s'\n body_template = '\\n'.join([\n 'Image: %(image)s',\n 'Status: %(endstate)s',\n 'Submitted by: %(user)s',\n 'Logs: %(logs)s',\n ])\n\n endstate = None\n if canceled:\n endstate = 'canceled'\n else:\n endstate = 'successful' if success else 'failed'\n url = None\n if self.url and self.workflow.openshift_build_selflink:\n url = urljoin(self.url, self.workflow.openshift_build_selflink + '/log')\n\n formatting_dict = {\n 'image': self.workflow.image,\n 'endstate': endstate,\n 'user': '<autorebuild>' if rebuild else self.submitter,\n 'logs': url\n }\n return (subject_template % formatting_dict, body_template % formatting_dict)", "def _get_message_body(self, template_file, message_data):\r\n return \"\"\"\r\nHello {username}:\r\n\r\nPlease activate your Bookie account by clicking on the following url:\r\n\r\n{url}\r\n\r\n---\r\nThe Bookie Team\"\"\".format(**message_data)\r\n # lookup = config['pylons.app_globals'].mako_lookup\r\n # template = lookup.get_template(template_file)\r\n\r\n # # template vars are a combo of the obj dict and the extra dict\r\n # template_vars = {'data': message_data}\r\n # return template.render(**template_vars)\r", "def email_body_new_proposal_notification_to_seller(meeting, buyer_name, buyer_profile_id):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\">\\n<tbody>\\n\\t<tr><td align=\"center\" valign=\"top\">\\n\\t</td></tr>\\n</tbody>\\n</table>'\n\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\">'\n\tmsg = msg + '\\n<tbody><tr>'\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tbody>'\n\tmsg = msg + '\\n\\t\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\n\\t\\t\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" />'\n\tmsg = msg + '\\n\\t\\t\\t\\t</a>'\n\tmsg = msg + '\\n\\t\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t\\t</tbody>'\n\tmsg = msg + '\\n\\t</table>'\n\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;padding-left:75px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\n\\t\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">'\n\tmsg = msg + '\\n\\t\\t\\t\\tGreat! You received a new proposal from <a href=\\\"https://127.0.0.1:5000/profile?hero=' + buyer_profile_id + '\\\" style=\"color:#1488CC\">'+ buyer_name + '</a>.'\n\tmsg = msg + '\\n\\t\\t\\t\\t<br><br><br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tTime: ' + meeting.meet_ts.strftime('%A, %b %d, %Y %H:%M %p') + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tDuration: ' + meeting.get_duration_in_hours() + ' hours<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tLocation: ' + str(meeting.meet_location) + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tFee: $' + str(meeting.meet_cost) + '<br><br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tDescription: ' + meeting.get_description_html() + '<br><br>'\n\tmsg = msg + '\\n\\t\\t\\t</font><br><br>'\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:10px;padding-left:75px;padding-bottom:150px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\n\\t\\t\\t<a href=\\\"'+ meeting.accept_url() +'\\\" style=\"color:#ffffff;text-decoration: none;display: inline-block;min-height: 38px;line-height: 39px;padding-right: 16px;padding-left: 16px;background: #1488CC;font-size: 14px;border-radius: 3px;border: 1px solid #1488CC;font-family:Garamond, EB Garamond, Georgia, serif; width:50px;text-align:center;\" target=\"_blank\">Accept</a> '\n\tmsg = msg + '\\n\\t\\t\\t<a href=\\\"'+ meeting.reject_url() +'\\\" style=\"color:#ffffff;text-decoration: none;display: inline-block;min-height: 38px;line-height: 39px;padding-right: 16px;padding-left: 16px;background: #e55e62;font-size: 14px;border-radius: 3px;border: 1px solid #e55e62;font-family:Garamond, EB Garamond, Georgia, serif; width:50px;text-align:center\" target=\"_blank\">Reject</a> '\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t</table>'\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\n\\t\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\n\\t\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\n\\t\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t</table>'\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\n\\t\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t</table>'\n\n\tmsg = msg + '\\n</tr></tbody>'\n\tmsg = msg + '</table>'\n\treturn msg", "def send_contact_us_receipt_email(**data):\n mail_file = os.path.join(APP_PATH, \"templates\", \"main\",\n \"contact-us-receipt\", \"content.txt\")\n with open(mail_file, \"r\") as f:\n msg_text = f.read()\n msg_html = render_template(\"main/contact-us-receipt/content.html\")\n msg = Message(\n f'[SetNow Support] Re: {data[\"subject\"]}',\n sender=\"setnow@tuta.io\",\n recipients=[data[\"email\"]],\n )\n msg.body = msg_text\n msg.html = msg_html\n mail.send(msg)", "def email_body_to_user_receiving_msg(profile, message):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;padding-left:75px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">'\n\tmsg = msg + 'You\\'ve got mail. It\\'s from <a href=\\\"https://127.0.0.1:5000/profile?hero=' + str(profile.prof_id) + '\\\" style=\"color:#1488CC\">' + profile.prof_name.encode('utf8', 'ignore') + '</a>.'\n\tmsg = msg + '<br><i>' + message.msg_content + '</i>'\n\tmsg = msg + '</font><br>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '<td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:10px;padding-left:75px;padding-bottom:200px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '<a href=\"#\" style=\"color:#ffffff;text-decoration: none;display: inline-block;min-height: 38px;line-height: 39px;padding-right: 16px;padding-left: 16px;background: #1488CC;font-size: 14px;border-radius: 3px;border: 1px solid #1488CC;font-family:Garamond, EB Garamond, Georgia, serif; width:50px;text-align:center;\" target=\"_blank\">Reply</a>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def email_body_to_user_sending_msg(profile, message):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">Way to get the conversation started! You messaged <a href=\\\"https://127.0.0.1:5000/profile?hero=' + profile.prof_id + '\\\" style=\"color:#1488CC\">' + profile.prof_name.encode('utf8', 'ignore') + '</a> and should get a response soon.<br><br>'\n\tmsg = msg + 'Until then, stand tight. <br><br>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def _get_message_body(self, template_file, message_data):\r\n return \"\"\"\r\nPlease click the link below to activate your account.\r\n\r\n{0}\r\n\r\nWe currently support importing from Google Bookmarks and Delicious exports.\r\nImporting from a Chrome or Firefox export does work, however it reads the\r\nfolder names in as tags. So be aware of that.\r\n\r\nGet the Chrome extension from the Chrome web store:\r\nhttps://chrome.google.com/webstore/detail/knnbmilfpmbmlglpeemajjkelcbaaega\r\n\r\nIf you have any issues feel free to join #bookie on freenode.net or report\r\nthe issue or idea on https://github.com/bookieio/Bookie/issues.\r\n\r\nWe also encourage you to sign up for our mailing list at:\r\nhttps://groups.google.com/forum/#!forum/bookie_bookmarks\r\n\r\nand our Twitter account:\r\nhttp://twitter.com/BookieBmarks\r\n\r\nBookie is open source. Check out the source at:\r\nhttps://github.com/bookieio/Bookie\r\n\r\n---\r\nThe Bookie Team\"\"\".format(message_data)", "def email_body_cancellation_from_seller_to_buyer():\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\"> <a href=\"#\" style=\"color:#1488CC\">{Insert user - seller}</a> cancelled your appointment.<br><br>'\n\tmsg = msg + '\\t\\t\\t Check out <a href=\"#\" style=\"color:#1488CC\">{Insert seller}</a>\\'s availability, and send a new proposal. (Sometimes, a little reshuffling can really make things happen!)</font><br><br>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def email_body_review_reminder():\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr>td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;padding-left:75px; padding-right:75px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t <font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">We hope you had a great appointment!<br>'\n\tmsg = msg + '\\t\\t\\t Your opinion goes a long way&mdash;write up your review of the appointment so others can learn from your experience with <a href=\"#\" style=\"color:#1488CC\">{user\\'s name}</a></font><br><br>'\n\tmsg = msg + '\\t</td></tr>'\n\n\tmsg = msg + '<td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:10px;padding-left:75px;padding-bottom:200px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '<a href=\"#\" style=\"color:#ffffff;text-decoration: none;display: inline-block;min-height: 38px;line-height: 39px;padding-right: 16px;padding-left: 16px;background: #1488CC;font-size: 14px;border-radius: 3px;border: 1px solid #1488CC;font-family:Garamond, EB Garamond, Georgia, serif; width:100px;text-align:center;\" target=\"_blank\">Rate & Review</a>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def _send_email_key(user):\n subject = _(\"Recover your %(site)s account\") % {'site': settings.APP_SHORT_NAME}\n data = {\n 'validation_link': settings.APP_URL + \\\n reverse(\n 'user_account_recover',\n kwargs={'key':user.email_key}\n )\n }\n template = get_template('authenticator/email_validation.txt')\n message = template.render(data)\n send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [user.email])", "def send_confirmation(send_to, apply_info):\n msg = \"\"\"Hello,\n\nThis is a friendly confirmation for your Simply Apply application for position '{job_title}' at {job_company}.\n\nThank you,\nThe Simply Hired Team\"\"\".format(**apply_info)\n\n send_email('Simply Apply <noreply@simplyhired.com>', send_to, 'Simply Apply Confirmation', msg)", "def activation_email_template(cls, user_id):\n user = get_user_model().objects.get(id=user_id)\n email = user.e_mail\n activation_key = user.activation_key\n\n htmly = get_template('activation.html')\n \n context_kw = Context({'user': {'email': email, 'activation_key': activation_key}})\n \n email_subject = 'Account confirmation - NoTes'\n from_email = 'testntsystems@gmail.com'\n html_content = htmly.render(context_kw)\n msg = EmailMultiAlternatives(email_subject, html_content, \n from_email, [email])\n msg.content_subtype = \"html\"\n msg.send()", "def generateNotifyMessage(self):\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n today = date.today()\n current_date = today.strftime(\"%B %d, %Y\")\n\n subject = \"Progam operating warning - Not Running\"\n body = \"Since \" + current_date + \" at \" + current_time \n msg = f'Subject: {subject} \\n\\n{body}'\n return msg", "def send_activation_email(self, user):\n\t\tactivation_key = self.get_activation_key(user)\n\t\tcontext = self.get_email_context(activation_key)\n\t\tcontext.update({\n\t\t\t'user': user\n\t\t})\n\t\tsubject = render_to_string(self.email_subject_template,\n\t\t\t\t\t\t\t\t context)\n\t\t# Force subject to a single line to avoid header-injection\n\t\t# issues.\n\t\tsubject = ''.join(subject.splitlines())\n\t\tmessage = render_to_string(self.email_body_template,\n\t\t\t\t\t\t\t\t context)\n\t\tuser.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)", "def create_email(username, provider):\n print(f\"Your new email is {username}@{provider}.com\")", "def send_user_email(user, subject, template_name, context=None):\n\n if context is None:\n context = {}\n\n context['user'] = user\n\n to = (user.email,)\n\n send(subject, to, template_name, context)", "def update_helpdesk(self, data):\n self.sr=data\n try:\n from email.mime.text import MIMEText\n from email.mime.multipart import MIMEMultipart\n except Exception, imperr:\n print(\"emailNotify failure - import error %s\" % imperr)\n return(-1)\n nHtml = []\n noHtml = \"\"\n clientEmail = ['helpdesk@mscsoftware.com']\n msg = MIMEMultipart()\n # This is the official email notifier\n rtUser = 'DONOTREPLY@mscsoftware.com'\n\n msg['From'] = rtUser\n msg['To'] = \", \".join(clientEmail)\n if self.data['groupid'] == 'Nastran-RG':\n msg[\"Cc\"] = \"msc-itsupport@mscsoftware.com,\\\n DL-ENG-BUILD@mscsoftware.com,\\\n raj.behera@mscsoftware.com\"\n elif self.data['groupid'] == 'Patran-RG':\n msg[\"Cc\"] = \"msc-itsupport@mscsoftware.com,\\\n DL-ENG-BUILD@mscsoftware.com,\\\n raj.behera@mscsoftware.com\"\n else: \n msg[\"Cc\"] = \"msc-itsupport@mscsoftware.com,\\\n DL-ENG-BUILD@mscsoftware.com,\\\n raj.behera@mscsoftware.com\"\n\n msg['Subject'] = 'Your Request SR# %s for VM provisioning \\\n reported failure for product %s' % \\\n\t\t\t ( self.sr['requestNumber'], pdict[self.data['groupid']] )\n nHtml.append(\"<html> <head></head> <body> <p>Jenkin's \\\n vCAC cloud client notification<br>\")\n nHtml.append(\"<b>Hi Helpdesk,</b><br><br><br>\")\n nHtml.append(\"Please create a ticket to solve \\\n the following problem and notify infra team.\")\n nHtml.append(\"VM creation readiness from vCAC cloud \\\n is reported failure, \\\n Product is <b>%s</b> is stuck.\" \\\n % pdict[self.data['groupid']])\n\n nHtml.append(\"Regression test for product <b>%s</b> \\\n is impacted.<br><br>\" % pdict[self.data['groupid']])\n if os.path.isdir(self.data['rundir']):\n jnfilepath=os.path.join(self.data['rundir'], 'hudjobname.dat')\n if os.path.isfile(jnfilepath):\n lines = [line.rstrip() for line in open(jnfilepath)]\n nHtml.append(\"Please follow job link for SR# \\\n related information.<br>\")\n nHtml.append(\"Jenkins Effected Job URL: \\\n <a href=%s> Effected Build \\\n Console</a><br><br><br>\" % (lines[0]))\n\n nHtml.append(\"This needs immediate attention.<br><br>\")\n nHtml.append(\"Regards,<br>\")\n nHtml.append(\"Rtest Administrator.<br>\")\n nHtml.append(\"[Note: This is an automated mail,\\\n Please do not reply to this mail.]<br>\")\n nHtml.append(\"</p> </body></html>\")\n noHtml = ''.join(nHtml)\n noBody = MIMEText(noHtml, 'html')\n msg.attach(noBody)\n s = smtplib.SMTP('postgate01.mscsoftware.com')\n s.sendmail(rtUser, [clientEmail] + \\\n msg[\"Cc\"].split(\",\"), msg.as_string())\n s.quit()\n return 0", "def email(self, instance):\r\n return mark_safe('<a href=\"mailto:{0}\">{1}</a>'.format(\r\n instance.user.email, instance.user.email,\r\n ))", "def verification_email_body(case_name, url, display_name, category, subcategory, breakpoint_1, breakpoint_2, hgnc_symbol, panels, gtcalls, tx_changes, name, comment):\n html = \"\"\"\n <ul>\n <li>\n <strong>Case {case_name}</strong>: <a href=\"{url}\">{display_name}</a>\n </li>\n <li><strong>Variant type</strong>: {category} ({subcategory})\n <li><strong>Breakpoint 1</strong>: {breakpoint_1}</li>\n <li><strong>Breakpoint 2</strong>: {breakpoint_2}</li>\n <li><strong>HGNC symbols</strong>: {hgnc_symbol}</li>\n <li><strong>Gene panels</strong>: {panels}</li>\n <li><strong>GT call</strong></li>\n {gtcalls}\n <li><strong>Amino acid changes</strong></li>\n {tx_changes}\n <li><strong>Comment</strong>: {comment}</li>\n <li><strong>Ordered by</strong>: {name}</li>\n </ul>\n \"\"\".format(\n case_name=case_name,\n url=url,\n display_name=display_name,\n category=category,\n subcategory=subcategory,\n breakpoint_1=breakpoint_1,\n breakpoint_2=breakpoint_2,\n hgnc_symbol=hgnc_symbol,\n panels=panels,\n gtcalls=gtcalls,\n tx_changes=tx_changes,\n name=name,\n comment=comment)\n\n return html", "def send_message(user_id, name, user_info, subject, body):\n send_mail(subject, body, settings.SERVER_EMAIL, [\"%s <%s>\" % (name, user_id)],\n fail_silently=False, html_message=body)", "def send_email_notification(request, question, answer):\n subject = 'New answer for your question'\n to_email = [question.user.email]\n html_message = render_to_string('email/answer.html', {\n 'answer': answer,\n 'question': question,\n 'link': request.build_absolute_uri(reverse('question_detail', kwargs={'pk': question.pk})),\n })\n plain_message = strip_tags(html_message)\n send_mail(subject, plain_message, settings.EMAIL_FROM, to_email, html_message=html_message)", "def send_reminder():\n\n name = config[\"email\"][\"name\"]\n user = config[\"email\"][\"user\"]\n subject = \"REMINDER: %s\" % sys.argv[1]\n body = sys.argv[2] if len(sys.argv) > 2 else \"\"\n email_helper.send(user, name, user, subject, body)", "def send_email(self,to, subj):\r\n\r\n \"\"\" Currently not implemented. \"\"\"\r\n print(to+'-'+subj)\r\n print(self.body)\r\n # Send the finalized email here.\r", "def email_body_meeting_reminder():\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">Drats. <a href=\"#\" style=\"color:#1488CC\">{insert seller name} cancelled your appointment</a>.<br><br>'\n\tmsg = msg + '\\t\\t\\t <a href=\"#\" style=\"color:#1488CC\">Reschedule</a> or you can send a message to inquire about the cancellation. <br><br>'\n\tmsg = msg + '\\t\\t\\t And, don\\'t worry! You won\\'t be charged, promise. </font><br><br>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def build_mails(user_data):\n mails = []\n print(\"Building texts\")\n for data in user_data:\n missingdata = data['datosquefaltan'].strip()\n if missingdata and missingdata != \"-\":\n missing = MISSING.format(missing_items=missingdata)\n else:\n missing = \"\"\n\n payment_key = (data['tiposocio'], data['formadepago'])\n print(\" \", payment_key, repr(missingdata))\n pago = ALL_PAYMENTS[payment_key]\n\n data.update(missing=missing, pago=pago)\n text = MAIN_TEXT.format(**data)\n\n recipient = \"{} {} <{}>\".format(data['nombre'], data['apellido'], data['email'])\n mails.append((recipient, text))\n\n return mails", "def send_activation_email(self):\n ctx_dict = {\n 'activation_key': self.activation_key,\n 'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS,\n 'user': self.user,\n 'SITE_URL': settings.SITE_URL,\n }\n subject = render_to_string('accounts/activation_email_subject.txt', ctx_dict)\n # Email subject *must not* contain newlines\n subject = ''.join(subject.splitlines())\n \n message = render_to_string('accounts/activation_email_body.html', ctx_dict)\n\n msg = EmailMultiAlternatives(subject, message, None, [self.user.email])\n msg.attach_alternative(message, \"text/html\")\n msg.send()", "def user2Link(user): \n # could also look up mail addrs via a table lookup, etc\n return '<a href=\"mailto:%(user)s@somewebsite.com\">%(user)s</a>' % {\"user\": user}", "def email_body_cancellation_from_buyer_within_48_hours_to_seller(buyer_name):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\"> Drats. <a href=\"#\" style=\"color:#1488CC\">' + buyer_name + '</a> cancelled your appointment.<br><br>'\n\tmsg = msg + '\\t\\t\\t Message <a href=\"#\" style=\"color:#1488CC\">'+buyer_name+'</a> to see if you can work out a new date and time. </font><br><br>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def payment_instructions_email_notification(sender, **kwargs):\n subject_template_name = \\\n 'shop_simplenotifications/payment_instructions_subject.txt'\n body_text_template_name = \\\n 'shop_simplenotifications/payment_instructions_body.txt'\n body_html_template_name = \\\n 'shop_simplenotifications/payment_instructions_body.html'\n \n request = kwargs.get('request')\n order = kwargs.get('order')\n \n emails = []\n if order.user and order.user.email: \n emails.append(order.user.email)\n if request and get_billing_address_from_request(request):\n address = get_billing_address_from_request(request)\n if hasattr(address, 'email'):\n emails.append(address.email)\n emails = list(set(emails)) # removes duplicated entries\n if emails:\n subject = loader.render_to_string(\n subject_template_name,\n RequestContext(request, {'order': order})\n )\n subject = subject.join(subject.splitlines())\n\n text_content = loader.render_to_string(\n body_text_template_name,\n RequestContext(request, {'order': order})\n )\n\n try:\n html_content = loader.render_to_string(\n body_html_template_name,\n RequestContext(request, {'order': order})\n )\n except TemplateDoesNotExist:\n html_content = None\n\n from_email = getattr(settings, 'SN_FROM_EMAIL',\n settings.DEFAULT_FROM_EMAIL)\n\n message = EmailMultiAlternatives(subject, text_content, from_email,\n emails)\n if html_content:\n message.attach_alternative(html_content, \"text/html\")\n message.send()", "def exec(self): \r\n emails = self.args[0].split(',')\r\n for email in emails:\r\n send_mail(self.args[1], self.args[2], email)\r\n return_text = \"Sent Mail To :: \" + self.args[0] +\"\\n\" + self.args[1] + \":\\n\" + self.args[2]\r\n return return_text", "def send_mail_to_onboard_new_reviewers(user_id, category):\n\n email_subject = 'Invitation to review suggestions'\n\n email_body_template = (\n 'Hi %s,<br><br>'\n 'Thank you for actively contributing high-quality suggestions for '\n 'Oppia\\'s lessons in %s, and for helping to make these lessons better '\n 'for students around the world!<br><br>'\n 'In recognition of your contributions, we would like to invite you to '\n 'become one of Oppia\\'s reviewers. As a reviewer, you will be able to '\n 'review suggestions in %s, and contribute to helping ensure that any '\n 'edits made to lessons preserve the lessons\\' quality and are '\n 'beneficial for students.<br><br>'\n 'If you\\'d like to help out as a reviewer, please visit your '\n '<a href=\"https://www.oppia.org/creator_dashboard/\">dashboard</a>. '\n 'and set your review preferences accordingly. Note that, if you accept,'\n 'you will receive occasional emails inviting you to review incoming '\n 'suggestions by others.<br><br>'\n 'Again, thank you for your contributions to the Oppia community!<br>'\n '- The Oppia Team<br>'\n '<br>%s')\n\n if not feconf.CAN_SEND_EMAILS:\n log_new_error('This app cannot send emails to users.')\n return\n\n recipient_user_settings = user_services.get_user_settings(user_id)\n can_user_receive_email = user_services.get_email_preferences(\n user_id).can_receive_email_updates\n\n if can_user_receive_email:\n # Send email only if recipient wants to receive.\n email_body = email_body_template % (\n recipient_user_settings.username, category, category,\n EMAIL_FOOTER.value)\n _send_email(\n user_id, feconf.SYSTEM_COMMITTER_ID,\n feconf.EMAIL_INTENT_ONBOARD_REVIEWER,\n email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)", "def format_mail(template: str, event: dict, ishtml: bool) -> str:\n header = \"Someone filled the contact form\"\n subtext = \"\"\n # uuid.uuid4().hex\n unsubscribe_key = \"f4bd5dd85908487b904ea189fb81e753\" # Not actually applicable for Admin email ID\n keys = ['firstName', 'lastName', 'email', 'subject', 'message']\n for key in keys:\n if ishtml:\n value = html.escape(event[key]).replace('\\n', '<br/>')\n subtext += \"{}: {}<br>\".format(key, value)\n else:\n subtext += \"{}: {}\\n\".format(key, event[key]).replace('\\n', '\\r\\n')\n template = template.replace('{{header}}', header)\n template = template.replace('{{subtext}}', subtext)\n template = template.replace('{{unsubscribe-key}}', unsubscribe_key)\n return template", "def notification(self, approver_list):\n dns_name = axops_client.get_dns()\n job_id = self.root_id\n url_to_ui = 'https://{}/app/jobs/job-details/{}'.format(dns_name, job_id)\n service = axops_client.get_service(job_id)\n\n html_payload = \"\"\"\n<html>\n<body>\n <table class=\"email-container\" style=\"font-size: 14px;color: #333;font-family: arial;\">\n <tr>\n <td class=\"msg-content\" style=\"padding: 20px 0px;\">\n The {} job is waiting for your approval. The job was triggered by {}.\n </td>\n </tr>\n <tr>\n <td class=\"commit-details\" style=\"padding: 20px 0px;\">\n <table cellspacing=\"0\" style=\"border-left: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;border-top: 1px solid #e3e3e3;\">\n <tr>\n <td class=\"item-label\" style=\"font-weight: bold;height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;\">Author</td>\n <td class=\"item-value\" style=\"height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;\">{}</td>\n </tr>\n <tr>\n <td class=\"item-label\" style=\"font-weight: bold;height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;\">Repo</td>\n <td class=\"item-value\" style=\"height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;\">{}</td>\n </tr>\n <tr>\n <td class=\"item-label\" style=\"font-weight: bold;height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;\">Branch</td>\n <td class=\"item-value\" style=\"height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;\">{}</td>\n </tr>\n <tr>\n <td class=\"item-label\" style=\"font-weight: bold;height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;\">Description</td>\n <td class=\"item-value\" style=\"height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;\">{}</td>\n </tr>\n <tr>\n <td class=\"item-label\" style=\"font-weight: bold;height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;\">Revision</td>\n <td class=\"item-value\" style=\"height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;\">{}</td>\n </tr>\n </table>\n </td>\n </tr>\n <tr>\n <td class=\"view-job\">\n <div>\n <!--[if mso]>\n <v:roundrect xmlns:v=\"urn:schemas-microsoft-com:vml\" xmlns:w=\"urn:schemas-microsoft-com:office:word\" href=\"{}\" style=\"height:40px;v-text-anchor:middle;width:150px;\" arcsize=\"125%\" strokecolor=\"#00BDCE\" fillcolor=\"#7fdee6\">\n <w:anchorlock/>\n <center style=\"color:#333;font-family:arial;font-size:14px;font-weight:bold;\">VIEW JOB</center>\n </v:roundrect>\n<![endif]--><a href=\"{}\" style=\"background-color:#7fdee6;border:1px solid #00BDCE;border-radius:50px;color:#333;display:inline-block;font-family:arial;font-size:14px;font-weight:bold;line-height:40px;text-align:center;text-decoration:none;width:150px;-webkit-text-size-adjust:none;mso-hide:all;\">VIEW JOB</a></div>\n </td>\n </tr>\n <tr>\n <td class=\"view-job\">\n <div>\n <!--[if mso]>\n <v:roundrect xmlns:v=\"urn:schemas-microsoft-com:vml\" xmlns:w=\"urn:schemas-microsoft-com:office:word\" href=\"{}\" style=\"height:40px;v-text-anchor:middle;width:150px;\" arcsize=\"125%\" strokecolor=\"#00BDCE\" fillcolor=\"#7fdee6\">\n <w:anchorlock/>\n <center style=\"color:#333;font-family:arial;font-size:14px;font-weight:bold;\">APPROVE</center>\n </v:roundrect>\n<![endif]--><a href=\"{}\" style=\"background-color:#7fdee6;border:1px solid #00BDCE;border-radius:50px;color:#333;display:inline-block;font-family:arial;font-size:14px;font-weight:bold;line-height:40px;text-align:center;text-decoration:none;width:150px;-webkit-text-size-adjust:none;mso-hide:all;\">APPROVE</a></div>\n </td>\n </tr>\n <tr>\n <td class=\"view-job\">\n <div>\n <!--[if mso]>\n <v:roundrect xmlns:v=\"urn:schemas-microsoft-com:vml\" xmlns:w=\"urn:schemas-microsoft-com:office:word\" href=\"{}\" style=\"height:40px;v-text-anchor:middle;width:150px;\" arcsize=\"125%\" strokecolor=\"#00BDCE\" fillcolor=\"#7fdee6\">\n <w:anchorlock/>\n <center style=\"color:#333;font-family:arial;font-size:14px;font-weight:bold;\">DECLINE</center>\n </v:roundrect>\n<![endif]--><a href=\"{}\" style=\"background-color:#7fdee6;border:1px solid #00BDCE;border-radius:50px;color:#333;display:inline-block;font-family:arial;font-size:14px;font-weight:bold;line-height:40px;text-align:center;text-decoration:none;width:150px;-webkit-text-size-adjust:none;mso-hide:all;\">DECLINE</a></div>\n </td>\n </tr>\n <tr>\n <td class=\"thank-you\" style=\"padding-top: 20px;line-height: 22px;\">\n Thanks,<br>\n Argo Project\n </td>\n </tr>\n </table>\n</body>\n</html>\n\"\"\"\n\n for user in approver_list:\n\n approve_token, decline_token = self.generate_token(user=user, dns_name=dns_name)\n\n approve_link = \"https://{}/v1/results/id/approval?token={}\".format(dns_name, approve_token)\n decline_link = \"https://{}/v1/results/id/approval?token={}\".format(dns_name, decline_token)\n\n msg = {\n 'to': [user],\n 'subject': 'The {} job requires your approval to proceed'.format(service['name']),\n 'body': html_payload.format(service['name'], service['user'],\n service['commit']['author'], service['commit']['repo'],\n service['commit']['branch'], service['commit']['description'], service['commit']['revision'],\n url_to_ui, url_to_ui, approve_link, approve_link, decline_link, decline_link),\n 'html': True\n }\n\n if service['user'] != 'system':\n try:\n user_result = axops_client.get_user(service['user'])\n msg['display_name'] = \"{} {}\".format(user_result['first_name'], user_result['last_name'])\n except Exception as exc:\n logger.error(\"Fail to get user %s\", str(exc))\n\n logger.info('Sending approval requests to %s', str(user))\n result = axsys_client.send_notification(msg)\n\n # TODO: Tianhe adding retry mechanism\n if result.status_code != 200:\n logger.error('Cannot send approval request, %s', result.content)\n sys.exit(1)\n logger.info('Successfully sent approval requests to reviewers.')", "def render_message(template_name, extra_context={}):\n mail_text = _render_mail_template(template_name, extra_context)\n rendered_mail = mail_text.replace(u\"\\r\\n\", u\"\\n\").replace(u\"\\r\", u\"\\n\").split(u\"\\n\")\n return rendered_mail[0], \"\\n\".join(rendered_mail[1:])", "def _send_email(self, confirmation_profile, url,\n subject, text_template, html_template,\n send_to, **kwargs):\n current_site = Site.objects.get_current()\n email_kwargs = {'activation_key': confirmation_profile.activation_key,\n 'domain': current_site.domain,\n 'activate_url': url,\n 'login_url': reverse('users.login'),\n 'reg': 'main'}\n email_kwargs.update(kwargs)\n\n # RegistrationProfile doesn't have a locale attribute. So if\n # we get one of those, then we have to get the real profile\n # from the user.\n if hasattr(confirmation_profile, 'locale'):\n locale = confirmation_profile.locale\n else:\n locale = confirmation_profile.user.profile.locale\n\n @email_utils.safe_translation\n def _make_mail(locale):\n mail = email_utils.make_mail(\n subject=subject,\n text_template=text_template,\n html_template=html_template,\n context_vars=email_kwargs,\n from_email=settings.DEFAULT_FROM_EMAIL,\n to_email=send_to)\n\n return mail\n\n email_utils.send_messages([_make_mail(locale)])", "def mail_sent():\n\n url = settings.SITE_URL + '\\charts'\n subject = 'Анализ запрошенного ресурса'\n message = 'Графики популярного часа дня и дня недели {}'.format(url)\n mail_sent = send_mail(subject,\n message,\n 'admin@myshop.com',\n ['user@mail.ru,'])\n print(message)\n return mail_sent", "def send_email(self, text):\n msg_text = MIMEText(text)\n msg_text['Subject'] = '[WebSite Watchdog] Failure'\n msg_text['From'] = self.from_email\n msg_text['To'] = self.to_email\n \n s = smtplib.SMTP(self.smtp_server)\n s.sendmail(self.from_email, [self.to_email], msg_text.as_string())\n s.quit()", "def email_body_appointment_confirmation_for_buyer(meeting, buyer_profile, sellr_profile, msg_url=\"https://127.0.0.1:5000/message?profile=xxxx\"):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">Ain\\'t life grand? Meeting\\'s on! <a href=\"https://127.0.0.1:5000/profile?'+ sellr_profile.prof_id + ' style=\"color:#1488CC\">\"' + sellr_profile.prof_name + '\" accepted your proposal.</a><br><br>'\n\tmsg = msg + '\\t\\t\\t Check out the details: <br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tTime: ' + meeting.meet_ts.strftime('%A, %b %d, %Y %H:%M %p') + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tDuration: ' + meeting.get_duration_in_hours() + ' hours<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tLocation: ' + str(meeting.meet_location) + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tFee: $' + str(meeting.meet_cost) + '<br><br>'\n\tmsg = msg + '\\t\\t\\t Need to edit, manage or update the appointment? <a href=\"https://127.0.0.1:5000/dashboard\" style=\"color:#1488CC\">Go for it</a>, or send <a href=\"'+msg_url+'\" style=\"color:#1488CC\">\"' + sellr_profile.prof_name + '\" a message.</a><br><br></font>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;padding-left:75px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\n\\t\\t\\t<img style=\"padding-right: 6px\" src=\"http://maps.googleapis.com/maps/api/staticmap?center=' + meeting.meet_location + '&zoom=15&size=400x450&markers=size:large%8Ccolor:0xFFFF00%7Clabel:Insprite%7C' + meeting.meet_location + '\">'\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t</table>'\n\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:10px;\"> <a href=\"mailto:thegang@insprite.co\" style=\"color:#1488CC\">Contact Us</a>'\n\tmsg = msg + '\\t\\t| Sent by <a href=\"https://insprite.co\" style=\"color:#1488CC\">Insprite</a>, California, USA. | <a href=\"#\" style=\"color:#1488CC\">Unsubscribe</a></font>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr> <td style=\"border-top: 0px solid #333333; border-bottom: 0px solid #FFFFFF;\">'\n\tmsg = msg + '\\t\\t<img width=\"596px\" src=\"http://ryanfbaker.com/insprite/footerImage.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def mail_template(self, template_name, send_to=None, user = None, event_title=\"\", **kwargs):\n barcamp = kwargs.get('barcamp')\n if user is None:\n user = self.user\n if send_to is None:\n send_to = user.email\n if barcamp is not None:\n subject = barcamp.mail_templates['%s_subject' %template_name]\n tmpl = jinja2.Template(barcamp.mail_templates['%s_text' %template_name])\n kwargs['fullname'] = user.fullname\n payload = tmpl.render(**kwargs)\n payload = payload.replace('((fullname))', user.fullname)\n payload = payload.replace('((event_title))', event_title)\n mailer = self.app.module_map['mail']\n mailer.mail(send_to, subject, payload)", "def save(self):\n super(Notification, self).save()\n # get user recipent\n us = self.notified_user\n # check that user has a valid email address\n if us.email.find('@') > 0 and us.email.find('.') > 0:\n # mandatory fields\n subject = strings.EMAIL_NOTIFICATION_SUBJECT\n to = us.email\n from_email = settings.DEFAULT_FROM_EMAIL\n # get text version of the message\n text_content = self.get_email_content_from_type(\n self.notification_type\n )\n # FIXME: HTML version implementation pending\n html_content = self.get_email_content_from_type(\n self.notification_type\n )\n msg = EmailMultiAlternatives(\n subject, \n text_content,\n from_email,\n [to]\n )\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()", "def email_body_cancellation_from_buyer_within_24_hours(sellr_name, cost):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\"> You cancelled the appointment with <a href=\"#\" style=\"color:#1488CC\">' + sellr_name + '</a>.<br><br>'\n\tmsg = msg + '\\t\\t\\t We know life can be busy, but we also value accountability within the community and adhere to a <a href=\"#\" style=\"color:#1488CC\">24-hour cancellation policy</a>. You will be charged <a href=\"#\" style=\"color:#1488CC\">$' + str(cost) + '</a> for the service. <br><br>'\n\tmsg = msg + '\\t\\t\\t Questions? <a href=\"#\" style=\"color:#1488CC\">Drop us a line</a> or read our <a href=\"#\" style=\"color:#1488CC\">Terms of Service</a> and <a href=\"#\" style=\"color:#1488CC\">cancellation policies</a> for additional information. </font><br><br>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def send_email_to_reporting_user(subject, template):\n send_connect_email(subject=subject,\n template=template,\n recipient=logged_by,\n logged_against=logged_against,\n site=site,\n comments=comments)", "def create_email_body_for_update_resources(results):\n\n failures = [url for url in results if results[url][\"state\"] == \"Failed\"]\n warnings = [url for url in results if results[url][\"state\"] == \"Warning\"]\n successes = [url for url in results if results[url][\"state\"] == \"Succeeded\"]\n\n body, html_content = \"\", \"\"\n\n # Failed\n if failures:\n body += f\"Failed [{len(failures)}]\\n\\n\"\n html_content += f\"<h2>Failed [{len(failures)}]</h2>\\n\\n\"\n\n for url in failures:\n result = results[url]\n\n body += f\"Resource: {url}\\n\"\n html_content += f'<h3 style=\"color: red;\">Resource: {url}</h3>\\n'\n\n html_content += \"<ul>\\n\"\n for message in result[\"messages\"]:\n body += f\" {message}\\n\"\n html_content += f\"<li>{message}</li>\\n\"\n html_content += \"</ul>\\n\"\n body += \"\\n\\n\"\n\n # Warnings\n if warnings:\n body += f\"Warnings [{len(warnings)}]\\n\\n\"\n html_content += f\"<h2>Failed [{len(warnings)}]</h2>\\n\\n\"\n\n for url in warnings:\n result = results[url]\n\n body += f\"Resource: {url}\\n\"\n html_content += f'<h3 style=\"color: orange;\">Resource: {url}</h3>\\n'\n\n html_content += \"<ul>\\n\"\n for message in result[\"messages\"]:\n body += f\" {message}\\n\"\n html_content += f\"<li>{message}</li>\\n\"\n html_content += \"</ul>\\n\"\n body += \"\\n\\n\"\n\n # Succeeded\n if successes:\n body += f\"Succeeded [{len(successes)}]\\n\\n\"\n html_content += f\"<h2>Succeeded [{len(successes)}]</h2>\\n\"\n\n for url in successes:\n result = results[url]\n\n body += f\"Resource: {url}\\n\"\n html_content += f'<h3 style=\"color: green;\">Resource: {url}</h3>\\n'\n\n html_content += \"<ul>\\n\"\n for message in result[\"messages\"]:\n body += f\" {message}\\n\"\n html_content += f\"<li>{message}</li>\\n\"\n html_content += \"</ul>\\n\"\n body += \"\\n\\n\"\n\n body_html = f\"\"\"\n<!DOCTYPE html>\n<html lang=\"en\">\n <head>\n <title>Updated BEL Resources for {settings.HOST_NAME}</title>\n </head>\n <body>\n <div id=\"content\">{html_content}</div>\n </body>\n</html>\n \"\"\"\n\n return (body, body_html)", "def send_feedback_email_task(subject, message, sender, reciever):\n logger.info(\"Reminder email\")\n return send_reminder_mail(subject, message, sender, reciever)", "def send_confirmation_email(data, key, text_template=\"inviteme/confirmation_email.txt\", html_template=\"inviteme/confirmation_email.html\"):\n site = Site.objects.get_current()\n subject = \"[%s] %s\" % (site.name, _(\"confirm invitation request\"))\n confirmation_url = reverse(\"inviteme-confirm-mail\", args=[key])\n message_context = Context({ 'data': data,\n 'confirmation_url': confirmation_url,\n 'support_email': DEFAULT_FROM_EMAIL,\n 'site': site })\n\n # prepare text message\n text_message_template = loader.get_template(text_template)\n text_message = text_message_template.render(message_context)\n # prepare html message\n html_message_template = loader.get_template(html_template)\n html_message = html_message_template.render(message_context)\n\n send_mail(subject, text_message, DEFAULT_FROM_EMAIL, [data['email'],], html=html_message)", "def send_feedback_message_email(recipient_id, feedback_messages):\n email_subject_template = (\n 'You\\'ve received %s new message%s on your explorations')\n\n email_body_template = (\n 'Hi %s,<br>'\n '<br>'\n 'You\\'ve received %s new message%s on your Oppia explorations:<br>'\n '<ul>%s</ul>'\n 'You can view and reply to your messages from your '\n '<a href=\"https://www.oppia.org/creator_dashboard\">dashboard</a>.'\n '<br>'\n '<br>Thanks, and happy teaching!<br>'\n '<br>'\n 'Best wishes,<br>'\n 'The Oppia Team<br>'\n '<br>%s')\n\n if not feconf.CAN_SEND_EMAILS:\n log_new_error('This app cannot send emails to users.')\n return\n\n if not feconf.CAN_SEND_FEEDBACK_MESSAGE_EMAILS:\n log_new_error('This app cannot send feedback message emails to users.')\n return\n\n if not feedback_messages:\n return\n\n recipient_user_settings = user_services.get_user_settings(recipient_id)\n\n messages_html = ''\n count_messages = 0\n for exp_id, reference in feedback_messages.iteritems():\n messages_html += (\n '<li><a href=\"https://www.oppia.org/create/%s#/feedback\">'\n '%s</a>:<br><ul>' % (exp_id, reference['title']))\n for message in reference['messages']:\n messages_html += ('<li>%s<br></li>' % message)\n count_messages += 1\n messages_html += '</ul></li>'\n\n email_subject = email_subject_template % (\n (count_messages, 's') if count_messages > 1 else ('a', ''))\n\n email_body = email_body_template % (\n recipient_user_settings.username, count_messages if count_messages > 1\n else 'a', 's' if count_messages > 1 else '', messages_html,\n EMAIL_FOOTER.value)\n\n _send_email(\n recipient_id, feconf.SYSTEM_COMMITTER_ID,\n feconf.EMAIL_INTENT_FEEDBACK_MESSAGE_NOTIFICATION,\n email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)", "def send_email_to_admins(self, template_name, subject, **kw):\n \n mailer = self.app.module_map['mail']\n barcamp = self.barcamp\n new_user = self.user # active user\n for admin in self.barcamp.admin_users:\n print admin\n send_tos = [admin.email]\n kwargs = dict(\n new_user = new_user,\n user = admin,\n barcamp = barcamp,\n url = self.handler.url_for(\"barcamps.index\", slug = self.barcamp.slug, _full = True),\n notification_url = self.handler.url_for(\"barcamps.edit\", slug = self.barcamp.slug, _full = True)\n )\n kwargs.update(kw)\n payload = self.handler.render_lang(\"emails/%s.txt\" %template_name, **kwargs)\n mailer.mail(admin.email, subject, payload)", "def training_application_request(request, training):\n\n subject = f'{settings.SITE_NAME} training application notification'\n body = loader.render_to_string(\n 'notification/email/notify_training_request.html', {\n 'training': training,\n 'applicant_name': training.user.get_full_name(),\n 'domain': get_current_site(request),\n 'url_prefix': get_url_prefix(request),\n 'signature': settings.EMAIL_SIGNATURE,\n 'footer': email_footer(), 'SITE_NAME': settings.SITE_NAME\n })\n send_mail(subject, body, settings.DEFAULT_FROM_EMAIL,\n [training.user.email], fail_silently=False)", "def mail_template(self, template_name, send_to=None, user = None, **kwargs):\n barcamp = kwargs.get('barcamp')\n if user is None:\n user = self.user\n if send_to is None:\n send_to = user.email\n if barcamp is not None:\n subject = barcamp.mail_templates['%s_subject' %template_name]\n tmpl = jinja2.Template(barcamp.mail_templates['%s_text' %template_name])\n kwargs['fullname'] = user.fullname\n payload = tmpl.render(**kwargs)\n payload = payload.replace('((fullname))', user.fullname)\n mailer = self.app.module_map['mail']\n mailer.mail(send_to, subject, payload)", "def send_application_submitted_notification(application):\n candidate_name = application.candidate_name\n if application.authorized_email is not None:\n candidate_email = application.authorized_email\n else:\n candidate_email = application.questionnaire.candidate_email\n\n group_name = application.group.name\n group_email = application.rep_email\n\n cc_emails = [\n '\"%s\" <%s>' % (candidate_name, candidate_email),\n '\"%s\" <%s>' % (\n 'Our Revolution Electoral Coordinator',\n ELECTORAL_COORDINATOR_EMAIL\n ),\n ]\n from_email = 'Our Revolution <%s>' % DEFAULT_FROM_EMAIL\n to_email = [\n # Use double quotes for group name\n '\"%s\" <%s>' % (group_name, group_email),\n ]\n\n subject = \"\"\"\n Your nomination for %s has been submitted! Here are the next steps.\n \"\"\" % candidate_name\n\n d = {\n 'or_logo_secondary': OR_LOGO_SECONDARY,\n 'group_name': group_name,\n 'candidate_name': candidate_name\n }\n\n html_template = get_template('email/application_submit_email.html')\n html_content = html_template.render(d)\n text_template = get_template('email/application_submit_email.txt')\n text_content = text_template.render(d)\n\n msg = EmailMultiAlternatives(\n subject,\n text_content,\n from_email,\n to_email,\n cc=cc_emails\n )\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()", "def generate_sms_email(data):\n body = data[\"message\"]\n user = data[\"user\"]\n\n if user.carrier is None or user.carrier == \"\" or user.phone is None:\n return None\n\n to_email = ''.join(e for e in user.phone if e.isalnum()) + \"@\" + user.carrier\n\n email = BasicEmailGenerator(to_emails=to_email, body=body)\n return email", "def email_user(\n email_template_key, \n user, \n template_parameters=None, \n event=None,\n subject_parameters=None, \n file_name='',\n file_path=''\n):\n if user is None:\n raise ValueError('You must specify a user!')\n\n language = user.user_primaryLanguage\n email_template = email_repository.get(None if event is None else event.id, email_template_key, language)\n\n if email_template is None:\n raise ValueError('Could not find email template with key {}'.format(email_template_key))\n \n subject_parameters = subject_parameters or {}\n if event is not None and 'event_name' not in subject_parameters:\n subject_parameters['event_name'] = event.get_name(language) if event.has_specific_translation(language) else event.get_name('en')\n\n subject = email_template.subject.format(**subject_parameters)\n\n template_parameters = template_parameters or {}\n if 'title' not in template_parameters:\n template_parameters['title'] = user.user_title\n if 'firstname' not in template_parameters:\n template_parameters['firstname'] = user.firstname\n if 'lastname' not in template_parameters:\n template_parameters['lastname'] = user.lastname\n if event is not None and 'event_name' not in template_parameters:\n template_parameters['event_name'] = event.get_name(language) if event.has_specific_translation(language) else event.get_name('en')\n\n body_text = email_template.template.format(**template_parameters)\n send_mail(recipient=user.email, subject=subject, body_text=body_text, file_name=file_name, file_path=file_path)", "def generate_selfservice_notice_email(context):\n subject = \"Self Service Form Submission\"\n from_email = settings.DEFAULT_FROM_ADDR\n to_email = [settings.EMAIL_TARGET_W, settings.EMAIL_TARGET_VP]\n\n cont_html = render_to_string('emails/email_selfservice.html', context)\n cont_text = render_to_string('emails/email_selfservice.txt', context)\n\n email = EmailMultiAlternatives(subject, cont_text, from_email, to_email)\n email.attach_alternative(cont_html, \"text/html\")\n\n return email", "def email_body_meeting_rejected_notification_to_seller(meeting, buyer_name, buyer_prof_id):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\">\\n<tbody>\\n\\t<tr>\\n\\t\\t<td align=\"center\" valign=\"top\">\\n\\t\\t</td>\\n\\t</tr>\\n</tbody>\\n</table>\\n\\n'\n\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\">'\n\tmsg = msg + '\\n<tbody>'\n\tmsg = msg + '\\n\\t<tr>'\n\n\tmsg = msg + '\\n\\t\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t\\t<tbody>'\n\tmsg = msg + '\\n\\t\\t\\t\\t<tr>'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t<td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t</td>'\n\tmsg = msg + '\\n\\t\\t\\t\\t</tr>'\n\tmsg = msg + '\\n\\t\\t\\t</tbody>'\n\tmsg = msg + '\\n\\t\\t</table>'\n\n\tmsg = msg + '\\n\\t\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\n\\t\\t\\t<tr>'\n\tmsg = msg + '\\n\\t\\t\\t\\t<td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t\\tYou did not accept a proposal from <a href=\\\"https://127.0.0.1:5000/profile?hero=\\\"' + buyer_prof_id + ' style=\"color:#1488CC\">' + buyer_name + '</a>.<br><br>'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t\\tMessage <a href=\"#\" style=\"color:#1488CC\">' + buyer_name + '</a> to see if you can work our a new date and time.'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t</font><br><br>'\n\tmsg = msg + '\\n\\t\\t\\t\\t</td>'\n\tmsg = msg + '\\n\\t\\t\\t</tr>'\n\tmsg = msg + '\\n\\t\\t</table>'\n\n\tmsg = msg + '\\n\\t\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t\\t<tr>'\n\tmsg = msg + '\\n\\t\\t\\t\\t<td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\n\\t\\t\\t\\t</td>'\n\tmsg = msg + '\\n\\t\\t\\t</tr>'\n\tmsg = msg + '\\n\\t\\t</table>'\n\n\tmsg = msg + '\\n\\t\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t\\t<tr>'\n\tmsg = msg + '\\n\\t\\t\\t\\t<td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\n\\t\\t\\t\\t</td>'\n\tmsg = msg + '\\n\\t\\t\\t</tr>'\n\tmsg = msg + '\\n\\t\\t</table>'\n\n\tmsg = msg + '\\n\\t</tr>'\n\tmsg = msg + '\\n</tbody>'\n\tmsg = msg + '</table>'\n\treturn msg", "def send_verification_email(request, user):\r\n\tuid = urlsafe_base64_encode(force_bytes(user.pk)) #.decode() ## Why did this work in another project?\r\n\ttoken = email_verification_token.make_token(user)\r\n\tactivation_link = request.build_absolute_uri(\r\n\t\t#f'{reverse(\"verify_email\")}?uid={uid}&token={token}'\r\n\t\tf'{reverse(\"accounts:verify_email\", kwargs={\"uid\": uid, \"token\": token})}'\r\n\t)\r\n\t\r\n\tmail_subject = account_strings.EMAIL_VERIFICATION_MAIL_SUBJECT\r\n\tmail_body = account_strings.get_email_verification_mail_body(activation_link)\r\n\tfrom_email = account_strings.NOTIFICATION_EMAIL\r\n\tsend_mail(\r\n\t\tmail_subject,\r\n\t\tmail_body,\r\n\t\tfrom_email,\r\n\t\t[user.email] ## to_email\r\n\t)", "def send_user_notification_callback(sender, **kwargs):\r\n user = kwargs['user']\r\n updated_state = kwargs['state']\r\n\r\n studio_request_email = settings.FEATURES.get('STUDIO_REQUEST_EMAIL', '')\r\n context = {'studio_request_email': studio_request_email}\r\n\r\n subject = render_to_string('emails/course_creator_subject.txt', context)\r\n subject = ''.join(subject.splitlines())\r\n if updated_state == CourseCreator.GRANTED:\r\n message_template = 'emails/course_creator_granted.txt'\r\n elif updated_state == CourseCreator.DENIED:\r\n message_template = 'emails/course_creator_denied.txt'\r\n else:\r\n # changed to unrequested or pending\r\n message_template = 'emails/course_creator_revoked.txt'\r\n message = render_to_string(message_template, context)\r\n\r\n try:\r\n user.email_user(subject, message, studio_request_email)\r\n except:\r\n log.warning(\"Unable to send course creator status e-mail to %s\", user.email)", "def construct_email_content(self):\n # Construct header of the message\n content = MAIL_HEAD_CONTENT.replace(\"TITLE_HOLDER\", self.title).replace('FAIL_JOB_HOLDER',\n self.fail_job_content).replace(\n \"TIME_HOLDER\", os.getenv(\"START_TIME\")).replace(\"GRAPH_HOLDER\", os.getenv(\"BENCHMARK_GRAPH\")).replace(\n \"JOB_HOLDER\", os.getenv(\"BENCHMARK_TYPE\")).replace(\"DEVICE_HOLDER\", os.getenv(\"DEVICE_TYPE\")).replace(\"CUDA_HOLDER\", os.getenv(\"VERSION_CUDA\")).replace('DISPLAY', self.job_display)\n\n if not self.alarm_info:\n return\n # Construct alarm content\n content += self.alarm_info\n # Construct the tail of the message\n content += MAIL_TAIL_CONTENT.replace(\"BENCHMARK_WEBSITE1\", os.getenv(\"BENCHMARK_WEBSITE1\", \"\")).strip().replace(\n 'RUN_ENV_HOLDER', self.env_content).replace(\"BENCHMARK_WEBSITE2\", os.getenv(\"BENCHMARK_WEBSITE2\"))\n\n with open(os.path.join(self.log_path, \"mail.html\"), \"w\") as f_object:\n f_object.write(content)", "def test_email_content():\n\n time_of_day = alerts.current_time()\n hostname = alerts.host_name()\n\n subject = \"Subject: Raspi-Sump Email Test\"\n message = \"Raspi-Sump Test Email\"\n\n return \"\\r\\n\".join(\n (\n f\"From: {configs['email_from']}\",\n f\"To: {configs['email_to']}\",\n f\"{subject}\",\n \"\",\n f\"{hostname} - {time_of_day} - {message}.\",\n )\n )", "def send_email(self, recipients, html_data, assignee=None):\n\n msg = MIMEMultipart('alternative')\n# msg['Subject'] = \"Jira Alert - Stagnant Jiras %s\" % self.options.fl_project\n msg['Subject'] = \"Jira Alert - Stagnant Jiras\"\n msg['From'] = 'jira.alert@lsi.com'\n if assignee:\n msg['To'] = assignee\n msg['Cc'] = ', '.join(recipients) # Assignee emails\n else:\n msg['To'] = ', '.join(recipients) # Main email\n \n html1 = \"<!DOCTYPE html><html><head><meta charset=\\\"utf-8\\\"/><title>HTML Reference</title></head><body>\"\n \n html2 = \"</body></html>\"\n \n final_message = \"%s%s%s\" % (html1, html_data, html2)\n html_message = MIMEText(final_message, 'html', _charset='utf-8')\n msg.attach(html_message)\n \n # Send the message via our own SMTP server.\n s = smtplib.SMTP('localhost')\n s.set_debuglevel(1)\n# s.sendmail('richard.leblanc@lsi.com', recipients, msg.as_string())\n s.sendmail('jira.alert@lsi.com', recipients, msg.as_string())\n s.quit()", "def generate_email(start_ref, end_ref, release_date=None):\r\n if release_date is None:\r\n release_date = default_release_date()\r\n prbe = prs_by_email(start_ref, end_ref)\r\n\r\n email = \"\"\"\r\n To: {emails}\r\n\r\n You merged at least one pull request for edx-platform that is going out\r\n in this upcoming release, and you are responsible for verifying those\r\n changes on the staging servers before the code is released. Please go\r\n to the release page to do so:\r\n\r\n https://edx-wiki.atlassian.net/wiki/display/ENG/Release+Page%3A+{date}\r\n\r\n The staging servers are:\r\n\r\n https://www.stage.edx.org\r\n https://stage-edge.edx.org\r\n\r\n Note that you are responsible for verifying any pull requests that you\r\n merged, whether you wrote the code or not. (If you didn't write the code,\r\n you can and should try to get the person who wrote the code to help\r\n verify the changes -- but even if you can't, you're still responsible!)\r\n If you find any bugs, please notify me and record the bugs on the\r\n release page. Thanks!\r\n \"\"\".format(\r\n emails=\", \".join(prbe.keys()),\r\n date=release_date.isoformat(),\r\n )\r\n return textwrap.dedent(email).strip()", "def email_members_old(request, course_prefix, course_suffix):\n error_msg=\"\"\n success_msg=\"\"\n form = EmailForm()\n if request.method == \"POST\":\n form = EmailForm(data=request.POST)\n if form.is_valid():\n sender = request.common_page_data['course'].title + ' Staff <class2go-noreply@cs.stanford.edu>'\n \n recipient_qset = User.objects.none() #get recipients in a QuerySet\n \n if form.cleaned_data['to'] == \"all\" :\n recipient_qset = request.common_page_data['course'].get_all_members()\n elif form.cleaned_data['to'] == \"students\" :\n recipient_qset = request.common_page_data['course'].get_all_students()\n elif form.cleaned_data['to'] == \"staff\" :\n recipient_qset = request.common_page_data['course'].get_all_course_admins()\n elif form.cleaned_data['to'] == \"myself\":\n recipient_qset = User.objects.filter(id=request.user.id)\n #pdb.set_trace()\n courses.email_members.tasks.email_with_celery.delay(\n form.cleaned_data['subject'],\n form.cleaned_data['message'],\n sender,\n recipient_qset.values_list('email',flat=True),\n course_title=request.common_page_data['course'].title,\n course_url=request.build_absolute_uri(reverse('courses.views.main', args=[course_prefix, course_suffix])))\n success_msg = \"Your email was successfully queued for sending\"\n #form = EmailForm()\n \n else:\n error_msg = \"Please fix the errors below:\"\n \n context = RequestContext(request)\n return render_to_response('email/email.html',\n {'form': form,\n 'error_msg': error_msg,\n 'success_msg': success_msg,\n 'course': request.common_page_data['course'],\n 'common_page_data': request.common_page_data},\n context_instance=context)", "def send_mail_to_notify_users_to_review(user_id, category):\n\n email_subject = 'Notification to review suggestions'\n\n email_body_template = (\n 'Hi %s,<br><br>'\n 'Just a heads-up that there are new suggestions to '\n 'review in %s, which you are registered as a reviewer for.'\n '<br><br>Please take a look at and accept/reject these suggestions at'\n ' your earliest convenience. You can visit your '\n '<a href=\"https://www.oppia.org/creator_dashboard/\">dashboard</a> '\n 'to view the list of suggestions that need a review.<br><br>'\n 'Thank you for helping improve Oppia\\'s lessons!'\n '- The Oppia Team<br>'\n '<br>%s')\n\n if not feconf.CAN_SEND_EMAILS:\n log_new_error('This app cannot send emails to users.')\n return\n\n recipient_user_settings = user_services.get_user_settings(user_id)\n can_user_receive_email = user_services.get_email_preferences(\n user_id).can_receive_email_updates\n\n if can_user_receive_email:\n # Send email only if recipient wants to receive.\n email_body = email_body_template % (\n recipient_user_settings.username, category, EMAIL_FOOTER.value)\n _send_email(\n user_id, feconf.SYSTEM_COMMITTER_ID,\n feconf.EMAIL_INTENT_REVIEW_SUGGESTIONS,\n email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)", "def send_releng(subject, body):\n send_mail(config.get('bodhi_email'), config.get('release_team_address'),\n subject, body)", "def send_new_email(user):\n token = user.get_token()\n message = Message(\n 'Verify Your New Email',\n sender='storcwebsite@gmail.com',\n recipients=[user.temp_email])\n message.body = f\"The email address associated with your Storc \" \\\n f\"account has changed.\\n\\nTo verify your new email address, \" \\\n f\"please click the link below:\\n\\n\" \\\n f\"{url_for('users.new_email', token=token, _external=True)}\"\n mail.send(message)", "def send_email_upon_registration(request, new_siteuser, via_social=False):\n\n screen_name = new_siteuser.screen_name\n email = new_siteuser.user.email\n subject = \"ChoralCentral - Welcome {}.\".format(screen_name)\n from_email = settings.EMAIL_HOST_USER\n\n if via_social:\n context = {'screen_name' : screen_name}\n text_email = render_to_string(\"siteuser/welcome_email_social.txt\", context)\n html_email = render_to_string(\"siteuser/welcome_email_social.html\", context)\n else:\n activation_link = request.build_absolute_uri(new_siteuser.get_user_creation_url())\n context = {'screen_name' : screen_name, 'activation_link' : activation_link}\n text_email = render_to_string(\"siteuser/welcome_email.txt\", context)\n html_email = render_to_string(\"siteuser/welcome_email.html\", context)\n\n for each in [email, \"choralcentral@gmail.com\"]:\n msg = EmailMultiAlternatives(subject, text_email, from_email, [each])\n msg.attach_alternative(html_email, \"text/html\")\n msg.send()", "def send_confirmation_email(self, user):\n verification_token = self.gen_verification_token(user)\n subject = 'Welcome @{}! Verify your account to start using Comparte Ride'.format(user.username)\n from_email = 'Comparte Ride <noreply@comparteride.com>'\n content = render_to_string(\n 'emails/users/account_verification.html',\n {\n 'token': verification_token,\n 'user': user\n })\n msg = EmailMultiAlternatives(subject, content, from_email, [user.email])\n msg.attach(content, 'text/html')\n msg.send()\n print(\"Sending email\")", "def _get_message_body(self, template_file, message_data):\r\n\r\n msg = \"\"\"\r\nYour bookmark import is complete! We've begun processing your bookmarks to\r\nload their page contents and fulltext index them. This process might take a\r\nwhile if you have a large number of bookmarks. Check out your imported\r\nbookmarks at https://bmark.us/{username}/recent.\r\n\r\n---\r\nThe Bookie Team\"\"\".format(**message_data)\r\n return msg", "def generate_email(self):\n email_dict = {'donor_name':self.name,\n 'donation_amount':self.last_donation(),\n 'total_amount':self.total_donations()}\n\n # Create formatted email that can be copied & pasted\n email = ('\\n'.join(['Dear {donor_name},','',\n 'Thank you for your generous donation of ${donation_amount:.2f}.',\n 'To date, you have donated a total of ${total_amount:.2f} to our charity.',\n 'Your contributions help new arrivals receive the highest quality care possible.',\n 'Please know that your donations make a world of difference!',\n '','Sincerely,','The Good Place Team'])).format(**email_dict)\n\n return(email)", "def email_body_cancellation_from_buyer_within_24_hours_to_seller(buyer_name, cost):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\"> <a href=\"#\" style=\"color:#1488CC\"> ' + buyer_name + ' </a> cancelled your appointment.<br><br>'\n\tmsg = msg + '\\t\\t\\t Sometimes things come up in life, but your time and talent are still valuable. You\\'ll receive '+ str(cost) +' from ' + buyer_name + ' for the cancelled booking.</font><br><br>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "async def send_status_message(user: discord.User, subscription_json: dict):\n qr_amount = str(int(Decimal(os.getenv('AMOUNT')) * Decimal(1000000000000000000000000000000)))\n uri_string = f\"nano:{subscription_json['payment_address']}?amount={qr_amount}\"\n qr = pyqrcode.create(uri_string)\n qr.png(f'qr/{user}.png', scale=4, module_color=\"#23272A\")\n\n await manage_roles(user.id, subscription_json)\n embed = discord.Embed(title=\"Subscription Information\", color=0x4169dd)\n embed.add_field(name=\"Subscription Status: \", value=\"**Active**\" if subscription_json['active'] else '**Inactive**')\n embed.add_field(name=\"Expiration Date\", value=subscription_json['expiration_date'][:10], inline=False)\n embed.add_field(name=\"Subscription Cost\", value=(os.getenv('AMOUNT') + ' NANO'), inline=False)\n await user.send(\n embed=embed\n )\n if not subscription_json['active']:\n await user.send(\n file=discord.File(f'qr/{user}.png')\n )\n await user.send(\n f\"Send {os.getenv('AMOUNT')} NANO to:\"\n )\n await user.send(\n f\"{subscription_json['payment_address']}\"\n )", "def make_email_message(itrf_begin, epoch_begin, itrf_final, epoch_final, velocity, date):\n\n message = \"Estimado Usuario,\\n\\nEn adjunto encontrará los resultados de la transformacion ITRF de acuerdo a la siguiente configuración:\\n\\nITRF inicial: \"+str(itrf_begin)+\"\\nEpoca inicial: \"+str(epoch_begin)+\"\\nITRF final: \"+str(itrf_final)+\"\\nEpoca final: \"+str(epoch_final)+\"\\nModelo de velocidad: \"+velocity+\"\\nFecha de la solicitud de la transformación: \"+date+\"\\n\\n\\nSaludos Cordiales,\\n\\nEquipo de Geodesia del IGVSB.\"\n return message", "def send_realtime_email(self,body_):\n import smtplib, ssl\n\n port = 465 # For SSL\n smtp_server = \"smtp.gmail.com\"\n sender_email = self.fromaddr # Enter your address\n receiver_email = self.toaddr # Enter receiver address\n password = self.pswd\n message = f\"\"\"\\\nSubject: [Test] Twitter real time (half) hourly trending alert\n\n{body_}\"\"\"\n\n context = ssl.create_default_context()\n # send to multiple emails\n for receiver in receiver_email:\n with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:\n server.login(sender_email, password)\n server.sendmail(sender_email, receiver, message)\n \n print(f'Email successfully sent to {receiver}')", "def generate_email(mail, env):\n race, results, standings = get_last_results_and_standings()\n next_race = get_next_race()\n\n subject = f\"Race digest - F1 2021 | Round {race.round} | {race.name}\"\n body = (f\"Results:\\n{results}\\n\\nCurrent standings:\\n\"\n f\"{standings}\\n\\nNext race: {next_race}\")\n\n login_info = env['EMAIL_ADDRESS'], env['EMAIL_PASSWORD']\n\n subs = update_db_and_get_subs(mail, (env['EMAIL_ADDRESS'], env['EMAIL_PASSWORD']))\n\n for sub in subs:\n send_email(subject, body, sub, login_info)", "def build_message():\n outgoing_mail = Mail()\n outgoing_mail.from_email = Email(email_from_address, email_from_name)\n outgoing_mail.subject = subject\n personalization = Personalization()\n for recipient in email_to_addresses:\n personalization.add_to(Email(recipient))\n outgoing_mail.add_personalization(personalization)\n outgoing_mail.add_content(Content(\"text/plain\", str.join('\\n', _log)))\n outgoing_mail.add_content(Content(\"text/html\", \"<html><body> {} </body></html>\".format(str.join(' <br /> ', _log))))\n return outgoing_mail.get()", "def email_body_meeting_rejected_notification_to_buyer(meeting, sellr_name):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\">\\n<tbody>\\n\\t<tr>\\n\\t\\t<td align=\"center\" valign=\"top\"></td>\\n\\t</tr>\\n</tbody>\\n</table>\\n\\n'\n\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\">'\n\tmsg = msg + '\\n<tbody>'\n\tmsg = msg + '\\n\\t<tr>'\n\n\tmsg = msg + '\\n\\t\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t\\t<tbody>'\n\n\tmsg = msg + '\\n\\t\\t\\t\\t<tr>'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t<td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t</td>'\n\tmsg = msg + '\\n\\t\\t\\t\\t</tr>'\n\tmsg = msg + '\\n\\t\\t\\t</tbody>'\n\tmsg = msg + '\\n\\t\\t</table>'\n\n\tmsg = msg + '\\n\\t\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\n\\t\\t\\t<tr>'\n\tmsg = msg + '\\n\\t\\t\\t\\t<td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t\\t' + sellr_name + ' didn\\'t accept your proposal this time around.<br><br>'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t\\tWhy, you ask? There could be many reasons, but trust us, don\\'t take it personally. <br><br>'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t\\tNeed to edit, manage or update the appointment? Go for it, or follow up with ' + sellr_name + '.'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t</font><br><br>'\n\tmsg = msg + '\\n\\t\\t\\t\\t</td>'\n\tmsg = msg + '\\n\\t\\t\\t</tr>'\n\tmsg = msg + '\\n\\t\\t</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:10px;\"> <a href=\"mailto:thegang@insprite.co\" style=\"color:#1488CC\">Contact Us</a>'\n\tmsg = msg + '\\t\\t| Sent by <a href=\"https://insprite.co\" style=\"color:#1488CC\">Insprite</a>, California, USA. | <a href=\"#\" style=\"color:#1488CC\">Unsubscribe</a></font>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr> <td style=\"border-top: 0px solid #333333; border-bottom: 0px solid #FFFFFF;\">'\n\tmsg = msg + '\\t\\t<img width=\"596px\" src=\"http://ryanfbaker.com/insprite/footerImage.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def send_main_email(self):\n\n print \"Sending main email\"\n \n # Make an html table to be body of email\n html_table = '<table style=\"font-size:12px\">'\n html_table += self.make_nfs_changed_rows(\"sprint\") # New features only\n html_table += self.make_nfs_changed_rows(\"status\") # New features only\n html_table += self.make_time_in_status_rows(self.stalled_nf_issues) \n html_table += self.make_time_in_status_rows(self.stalled_st_issues) # Sub-tasks\n html_table += '</table>' # Closing table tag\n\n recipients = self.config.get(\"recipients\", \"emails\").split(\"\\n\") # [recipients] section in .ini file\n \n# emails = self.config.items('recipients')\n# for key, email in emails:\n# recipients = ', '.join(self.config.items('recipients'))\n \n print recipients\n# sys.exit()\n self.send_email(recipients, html_table)", "def alert_new_service_notification(hirer, worker, service):\n\n domain = Site.objects.get_current().domain\n url = \"http://\" + domain + \"/worker/\"\n\n message = loader.get_template(\n 'alerts/new_service_notification.txt').render(\n {'worker': worker, 'hirer': hirer, 'service': service, 'url':url})\n\n return message", "def delegate_about_event():\n\n regs = Registration.objects.all()\n\n template = 'notifications/sprints_about_mail.html'\n\n for reg in regs:\n subject = 'SciPy.in 2011: Details of the individual events'\n message = loader.render_to_string(\n template, dictionary={'name': reg.registrant.username})\n\n reg.registrant.email_user(subject=subject, message=message,\n from_email='madhusudancs@gmail.com')", "def email_page(data):\n subject = f\"Inkbusters form contact: {data['title']}\"\n sender = current_app.config[\"MAIL_USERNAME\"]\n recipients= ['adrian.borowski.tattoo@gmail.com']\n text_body=render_template('email/email_contact.txt', data=data)\n html_body=render_template('email/email_contact.html', data=data)\n\n send_email(\n subject=subject,\n sender=sender,\n recipients=recipients,\n text_body=text_body,\n html_body=html_body\n )" ]
[ "0.6634772", "0.63277805", "0.6235654", "0.6227511", "0.61810446", "0.61754483", "0.6124126", "0.6124126", "0.6104047", "0.60752434", "0.6052107", "0.59274155", "0.5924757", "0.590727", "0.58884096", "0.5875973", "0.5858161", "0.58423764", "0.58318377", "0.58315945", "0.58279955", "0.5795231", "0.57707673", "0.57334375", "0.5730184", "0.57179993", "0.57176435", "0.5716276", "0.5711955", "0.57045484", "0.57016027", "0.57001954", "0.5667853", "0.56559974", "0.5652347", "0.5647647", "0.5641655", "0.5641396", "0.56351817", "0.56146413", "0.56125176", "0.560709", "0.5606812", "0.5598629", "0.5575947", "0.5570716", "0.55698615", "0.5569801", "0.55332744", "0.5527812", "0.55207396", "0.55182964", "0.5515628", "0.5511309", "0.5503591", "0.54973906", "0.54964584", "0.5494499", "0.54933023", "0.54923415", "0.5487479", "0.5477847", "0.5473391", "0.547322", "0.54712486", "0.5469506", "0.54692644", "0.5468437", "0.54665804", "0.54636526", "0.5460613", "0.54594964", "0.54545236", "0.5444473", "0.54439396", "0.5436172", "0.54323214", "0.54318106", "0.5426556", "0.5423188", "0.5410673", "0.53999907", "0.53987384", "0.5391675", "0.53892463", "0.53842825", "0.53827614", "0.5382633", "0.5377757", "0.5372551", "0.5371769", "0.5370227", "0.5368694", "0.53662866", "0.5358331", "0.53513634", "0.53497165", "0.5345298", "0.53393984", "0.53379595" ]
0.777387
0
Create a formatted email message to sent to worker user template alerts/new_service_notification.txt
def alert_new_service_notification(hirer, worker, service): domain = Site.objects.get_current().domain url = "http://" + domain + "/worker/" message = loader.get_template( 'alerts/new_service_notification.txt').render( {'worker': worker, 'hirer': hirer, 'service': service, 'url':url}) return message
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_created_email(self):\n if settings.NOTIFY_NEW_REG:\n to = settings.NOTIFY_NEW_REG\n message = \"\"\"\\\nGreetings,<br><br>\n\nA new vehicle registration has been submitted by %s.<br><br>\n\nGo here to view or edit the request: <br>\n<a href=\"%s\">%s</a>\n<br><br>\nSincerely,<br><br>\nThe Janelia Parking Permit Program\n \"\"\" % (self.user_display_name(), self.get_edit_url(True), self.get_edit_url(True))\n subject = 'A new parking permit request has been entered'\n from_email = 'parkingpermit-donotreply@janelia.hhmi.org'\n text_content = re.sub(r'<[^>]+>','',message)\n html_content = message\n msg = EmailMultiAlternatives(subject, text_content, from_email, to)\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()", "def alert_service_notification(user, service):\n\n message = loader.get_template(\n 'alerts/service_notification.txt').render(\n {'user': user, 'service': service})\n\n return message", "def generateNotifyMessage(self):\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n today = date.today()\n current_date = today.strftime(\"%B %d, %Y\")\n\n subject = \"Progam operating warning - Not Running\"\n body = \"Since \" + current_date + \" at \" + current_time \n msg = f'Subject: {subject} \\n\\n{body}'\n return msg", "def generate_selfservice_notice_email(context):\n subject = \"Self Service Form Submission\"\n from_email = settings.DEFAULT_FROM_ADDR\n to_email = [settings.EMAIL_TARGET_W, settings.EMAIL_TARGET_VP]\n\n cont_html = render_to_string('emails/email_selfservice.html', context)\n cont_text = render_to_string('emails/email_selfservice.txt', context)\n\n email = EmailMultiAlternatives(subject, cont_text, from_email, to_email)\n email.attach_alternative(cont_html, \"text/html\")\n\n return email", "def create_email(username, provider):\n print(f\"Your new email is {username}@{provider}.com\")", "def notify(template_name, context):\n to_address = context['to_address']\n template_name = 'emails/' + template_name\n subject_template = get_template(\n template_name + '_subject.html')\n body_template = get_template(template_name + '_body.html')\n context = Context(context)\n # Strip, otherwise we get header errors.\n subject = subject_template.render(context).strip()\n body = body_template.render(context)\n try:\n sent = send_mail(subject, body, FROM_ADDRESS, [to_address])\n except Exception:\n pass\n\n status = 's' if sent else 'e'\n Notification.objects.create(\n from_address=FROM_ADDRESS,\n to_address=to_address,\n subject=subject,\n body=body,\n status=status,\n )", "def format_mail(template: str, event: dict, ishtml: bool) -> str:\n header = \"Someone filled the contact form\"\n subtext = \"\"\n # uuid.uuid4().hex\n unsubscribe_key = \"f4bd5dd85908487b904ea189fb81e753\" # Not actually applicable for Admin email ID\n keys = ['firstName', 'lastName', 'email', 'subject', 'message']\n for key in keys:\n if ishtml:\n value = html.escape(event[key]).replace('\\n', '<br/>')\n subtext += \"{}: {}<br>\".format(key, value)\n else:\n subtext += \"{}: {}\\n\".format(key, event[key]).replace('\\n', '\\r\\n')\n template = template.replace('{{header}}', header)\n template = template.replace('{{subtext}}', subtext)\n template = template.replace('{{unsubscribe-key}}', unsubscribe_key)\n return template", "def alert_subscription_message(request, user):\n message = loader.get_template(\n 'alerts/subscription_message.txt').render(\n {'user': user, 'evaluation_link': resolve_worker_evaluation_url(request, user)})\n\n return message", "def notify(nt_id, application, action, remedy, subj, heading):\n\n email = get_email(nt_id)\n lambda_client = boto3.client('lambda')\n messages = create_messages(application, action, remedy)\n print(email)\n email_data = {\n 'sender_mail': SENDER_EMAIL,\n 'email': email,\n 'subj': subj,\n 'heading': heading,\n 'messages': messages,\n 'region': os.environ.get(\"AWS_DEFAULT_REGION\")\n }\n invoke_email_response = lambda_client.invoke(\n FunctionName= os.environ.get(\"formatted_email\"),\n InvocationType= \"RequestResponse\",\n Payload= json.dumps(email_data)\n )\n err = checkError(invoke_email_response, \"Error sending email!\")\n if err:\n print(str(err))\n\n slack_data = {\n 'application_url': APP_URL,\n 'channel': CHANNEL,\n 'message': messages[1].rsplit(\"\\n\",5)[0],\n 'channel_id': CHANNEL_ID,\n 'nt_ids': [nt_id]\n }\n invoke_slack_response = lambda_client.invoke(\n FunctionName= os.environ.get(\"slack_message\"),\n InvocationType= \"RequestResponse\",\n Payload= json.dumps(slack_data)\n )\n err = checkError(invoke_slack_response, \"Error sending slack message!\")\n if err:\n print(str(err))", "def task_send_reminder_email():\n send_reminder_email()\n logger.info(\"Sent reminder email\")", "def send_welcome_email(user):\n\n register.customise_auth_messages()\n auth_messages = current.auth.messages\n\n try:\n recipient = user[\"email\"]\n except (KeyError, TypeError):\n recipient = None\n if not recipient:\n current.response.error = auth_messages.unable_send_email\n return\n\n # Look up CMS template for welcome email\n db = current.db\n s3db = current.s3db\n\n settings = current.deployment_settings\n\n # Define join\n ctable = s3db.cms_post\n ltable = s3db.cms_post_module\n join = ltable.on((ltable.post_id == ctable.id) & \\\n (ltable.module == \"auth\") & \\\n (ltable.resource == \"user\") & \\\n (ltable.deleted == False))\n\n # Get message template\n query = (ctable.name == \"WelcomeMessage\") & \\\n (ctable.deleted == False)\n row = db(query).select(ctable.doc_id,\n ctable.body,\n join = join,\n limitby = (0, 1),\n ).first()\n if row:\n message_template = row.body\n else:\n # Disabled\n return\n\n # Look up attachments\n dtable = s3db.doc_document\n query = (dtable.doc_id == row.doc_id) & \\\n (dtable.file != None) & (dtable.file != \"\") & \\\n (dtable.deleted == False)\n rows = db(query).select(dtable.file)\n attachments = []\n for row in rows:\n filename, stream = dtable.file.retrieve(row.file)\n attachments.append(current.mail.Attachment(stream, filename=filename))\n\n # Default subject from auth.messages\n system_name = s3_str(settings.get_system_name())\n subject = s3_str(auth_messages.welcome_email_subject % \\\n {\"system_name\": system_name})\n\n # Custom message body\n data = {\"system_name\": system_name,\n \"url\": settings.get_base_public_url(),\n \"profile\": URL(\"default\", \"person\", host=True),\n }\n message = formatmap(message_template, data)\n\n # Send email\n success = current.msg.send_email(to = recipient,\n subject = subject,\n message = message,\n attachments = attachments,\n )\n if not success:\n current.response.error = auth_messages.unable_send_email", "def test_email_content():\n\n time_of_day = alerts.current_time()\n hostname = alerts.host_name()\n\n subject = \"Subject: Raspi-Sump Email Test\"\n message = \"Raspi-Sump Test Email\"\n\n return \"\\r\\n\".join(\n (\n f\"From: {configs['email_from']}\",\n f\"To: {configs['email_to']}\",\n f\"{subject}\",\n \"\",\n f\"{hostname} - {time_of_day} - {message}.\",\n )\n )", "def generate_web_service_email(details):\n subject = details[\"subject\"]\n body = details[\"message\"]\n from_email = settings.DEFAULT_FROM_ADDR\n reply_to_email = [settings.EMAIL_TARGET_W]\n to_email = details[\"email_to\"]\n\n email = GenericEmailGenerator(subject=subject, to_emails=to_email, bcc=reply_to_email, from_email=from_email,\n reply_to=reply_to_email, body=body, context={'mrkdwn': True})\n\n return email", "def create_messages(application, action, remedy):\n\n messages = [] \n messages.append(\"\"\"Your Resources: </br><pre style=\"margin-left: 40px\">\"\"\" + application + \"</br></pre>\" + action + \"\"\" in AWS. <strong style=\"font-family: 'Helvetica Neue',Helvetica,Arial,sans-serif; box-sizing: border-box; font-size: 14px; margin: 0;\">\"\"\" + remedy +\"\"\"</strong>\n </td>\n </tr><tr style=\"font-family: 'Helvetica Neue',Helvetica,Arial,sans-serif; box-sizing: border-box; font-size: 14px; margin: 0;\"><td class=\"content-block\" style=\"font-family: 'Helvetica Neue',Helvetica,Arial,sans-serif; box-sizing: border-box; font-size: 14px; vertical-align: top; margin: 0; padding: 0 0 20px;\" valign=\"top\">\n This message was sent to inform you of changes happening to your resources.\n <ul>\n <li>New instances are auto-tagged with an expiration date, an NT ID, and a patch group if invalid.</li>\n <li>Instances without the necessary tags are notified through email and Slack.</li>\n </ul>\n If you have any further questions, please reply to this email.\"\"\")\n \n messages.append(\"Your Resources:\\n\\n\" + application + \"\\n\\n\" + action + \" in AWS. \" + remedy + \"\\n\" + \n (\"\\nThis message was sent to inform you of changes happening to your resources.\\n\"\n \"\\nNew instances are auto-tagged with an expiration date, an NT ID, and a patch group if invalid.\"\n \"Instances without Owner Mail and Owner Team tags are notified through email and slack.\\n\"\n \"\\nIf you have any further questions, please reply to this email.\")) \n\n return messages", "def test_notification_creation_email(self):\n mailhost = api.portal.get_tool('MailHost')\n self.assertEqual(len(mailhost.messages), 1)\n msg = message_from_string(mailhost.messages[0])\n\n self.assertEqual(msg['To'], BOARD_LIST_ADDRESS)\n self.assertEqual(\n msg['From'], 'EESTEC International <noreply@eestec.net>')\n self.assertEqual(\n msg['Subject'],\n '=?utf-8?q?=5BEVENTS=5D=5BCreated=5D_T=C3=A9st_event?=',\n )\n self.assertIn('a new Event has been created', msg.get_payload())\n self.assertIn('T=C3=A9st event', msg.get_payload())", "def send_welcome_email(cls, user):\n\n cls.customise_auth_messages()\n auth_messages = current.auth.messages\n\n # Look up CMS template for welcome email\n try:\n recipient = user[\"email\"]\n except (KeyError, TypeError):\n recipient = None\n if not recipient:\n current.response.error = auth_messages.unable_send_email\n return\n\n\n db = current.db\n s3db = current.s3db\n\n settings = current.deployment_settings\n\n # Define join\n ctable = s3db.cms_post\n ltable = s3db.cms_post_module\n join = ltable.on((ltable.post_id == ctable.id) & \\\n (ltable.module == \"auth\") & \\\n (ltable.resource == \"user\") & \\\n (ltable.deleted == False))\n\n # Get message template\n query = (ctable.name == \"WelcomeMessageInvited\") & \\\n (ctable.deleted == False)\n row = db(query).select(ctable.doc_id,\n ctable.body,\n join = join,\n limitby = (0, 1),\n ).first()\n if row:\n message_template = row.body\n else:\n # Disabled\n return\n\n # Look up attachments\n dtable = s3db.doc_document\n query = (dtable.doc_id == row.doc_id) & \\\n (dtable.file != None) & (dtable.file != \"\") & \\\n (dtable.deleted == False)\n rows = db(query).select(dtable.file)\n attachments = []\n for row in rows:\n filename, stream = dtable.file.retrieve(row.file)\n attachments.append(current.mail.Attachment(stream, filename=filename))\n\n # Default subject from auth.messages\n system_name = s3_str(settings.get_system_name())\n subject = s3_str(auth_messages.welcome_email_subject % \\\n {\"system_name\": system_name})\n\n # Custom message body\n data = {\"system_name\": system_name,\n \"url\": settings.get_base_public_url(),\n \"profile\": URL(\"default\", \"person\", host=True),\n }\n message = formatmap(message_template, data)\n\n # Send email\n success = current.msg.send_email(to = recipient,\n subject = subject,\n message = message,\n attachments = attachments,\n )\n if not success:\n current.response.error = auth_messages.unable_send_email", "def create_email(user):\n if 'research' in user.get_domains():\n domain = 'research'\n else: domain = 'academic'\n subject = \"ECE/CIS Account Created\"\n helprequest = \"https://www.eecis.udel.edu/service\"\n \n message = \"Your ECE/CIS %s account has been created with the username: %s\\n\\n\" % (domain, user.username)\n message += \"Please do not reply to this message. If you need assistance with your account, please visit:\\n\"\n message += \"%s\\n\\n\" % helprequest\n message += \"-- EE/CIS Labstaff\\n\"\n\n send('account@eecis.udel.edu', 'ECE/CIS Account System', \\\n [user.email], subject, message, MAILHOST)", "def get_first_trial_communication_email(account):\n\n SUBJECT = 'Foojal: First couple of days'\n EMAIL_CONTENT = \"\"\"\n\nHello %s\n\nJust checking to see how you are liking your first few days of Foojal.com.\nIf you have any questions during your trial period, please email us; we would\nlove to talk with you.\n\nYour Team:\n%s\"\"\"\n\n message = EmailMessage()\n message.sender = settings.SITE_EMAIL\n message.to = account.user.email()\n message.subject = SUBJECT\n message.body = EMAIL_CONTENT % (account.nickname, settings.SITE_EMAIL)\n return message", "def send_confirmation(send_to, apply_info):\n msg = \"\"\"Hello,\n\nThis is a friendly confirmation for your Simply Apply application for position '{job_title}' at {job_company}.\n\nThank you,\nThe Simply Hired Team\"\"\".format(**apply_info)\n\n send_email('Simply Apply <noreply@simplyhired.com>', send_to, 'Simply Apply Confirmation', msg)", "def notifications_n_email_after_event_creation(sender, instance, **kwargs):\n alarm = instance.alarm # Alarm which generated the event\n\n subscriptions = Subscription.objects.filter(alarm=alarm) # Getting the subscriptions associated with alarms\n sub_serializer = SubscriptionSerializer(subscriptions, many=True)\n send = [] # list of emails which the mail was send\n notificated = [] # list with users notificated\n\n # If no device, no variable and no content_type, there is nothing to send yet. Cancel notification and email\n if instance.device is None and instance.variables is None and len(instance.content_type.all()) == 0 :\n return\n for sub in sub_serializer.data: # Itering for subscription\n if sub['user'] is not None: # if user field isn't NULL AND not Group\n user = User.objects.get(id=sub['user'])\n if sub['active'] and user not in notificated: # if subscription is active\n Notification.objects.create(user=user, event=instance) # creating notification\n notificated.append(user) # adding user to the notified list\n if sub['email']: # if email option is checked\n email = user.email\n if email not in send: # for dont repeat email\n # Get a dict with relevant information about the event\n context = {'event': instance,\n 'alarm': instance.alarm,\n 'user': user,\n 'device': instance.device,\n 'var': instance.variables,\n 'content_type': instance.content_type.all()}\n plain_text = get_template('mail.txt') # Plain text template\n text_content = plain_text.render(context)\n subject = 'Event Alert: ' + instance.__str__()\n from_email = 'noreply@localhost.com'\n to = email\n msg = EmailMultiAlternatives(subject, text_content, from_email, [to])\n try:\n if sub['staff_template'] is not None:\n htmly = get_template(sub['staff_template']) # Define the HTML template\n html_content = htmly.render(context) # Rendering the templates with context information\n elif sub['staff_template_text'] != \"\":\n htmly = Template(sub['staff_template_text'])\n html_content = htmly.render(Context(context))\n elif sub['user_template'] is not None:\n htmly = get_template(sub['user_template']) # Define the HTML template\n html_content = htmly.render(context) # Rendering the templates with context information\n elif sub['user_template_text'] != \"\":\n htmly = Template(sub['user_template_text'])\n html_content = htmly.render(Context(context))\n msg.attach_alternative(html_content, 'text/html')\n msg.send()\n except:\n msg.send()\n print('Mail send to %s' % email)\n\n if sub['group'] is not None: # if is group and not user\n users_mail_list = [] # list with staff users instances\n if sub['active']: # if subscription is active\n group = Group.objects.get(pk=sub['group']) # Getting the group by id\n users = User.objects.filter(groups__name=group) # getting the users for group\n context = {'event': instance,\n 'alarm': instance.alarm,\n 'user': group,\n 'device': instance.device,\n 'var': instance.variables}\n for user in users: # Iterating users\n if user not in notificated:\n Notification.objects.create(user=user, event=instance) # creating notification\n notificated.append(user) # adding user to notificated list\n if sub['email']:\n mail = user.email # Adding the email for users in the user list\n if mail not in send: # for don't repeat email\n users_mail_list.append(mail)\n send.append(mail)\n # After getting all the emails and classifying it for staff and not staff members\n plain_text = get_template('mail.txt') # Plain text template\n text_content = plain_text.render(context)\n subject = 'Event Alert: ' + instance.__str__()\n from_email = 'noreply@localhost.com'\n msg = EmailMultiAlternatives(subject, text_content, from_email, users_mail_list)\n try:\n if sub['staff_template'] is not None:\n htmly = get_template(sub['staff_template']) # Define the HTML template\n html_content = htmly.render(context) # Rendering the templates with context information\n elif sub['staff_template_text'] != \"\":\n htmly = Template(sub['staff_template_text'])\n html_content = htmly.render(Context(context))\n elif sub['user_template'] is not None:\n htmly = get_template(sub['user_template']) # Define the HTML template\n html_content = htmly.render(context) # Rendering the templates with context information\n elif sub['user_template_text'] != \"\":\n htmly = Template(sub['user_template_text'])\n html_content = htmly.render(Context(context))\n msg.attach_alternative(html_content, 'text/html')\n msg.send()\n except:\n msg.send()\n print('Mail send to %s' % str(users_mail_list))", "def get_second_trial_communication_email(account):\n\n SUBJECT = \"Foojal: Don't lose out.\"\n EMAIL_CONTENT = \"\"\"\n\nHello %s\n\nJust checking to see how you are liking your Foojal.com trial subscription.\n\nSign up today for a full year of Foojal.com for only $24.00 a year before we increase the price.\nThat's only $2.00 a month.\n\nIf you have any questions during your trial period, please email us; we would\nlove to talk with you.\n\nThank you, Kathy and Adam\n%s\"\"\"\n\n message = EmailMessage()\n message.sender = settings.SITE_EMAIL\n message.to = account.user.email()\n message.subject = SUBJECT\n message.body = EMAIL_CONTENT % (account.nickname, settings.SITE_EMAIL)\n return message", "def make_email_message(itrf_begin, epoch_begin, itrf_final, epoch_final, velocity, date):\n\n message = \"Estimado Usuario,\\n\\nEn adjunto encontrará los resultados de la transformacion ITRF de acuerdo a la siguiente configuración:\\n\\nITRF inicial: \"+str(itrf_begin)+\"\\nEpoca inicial: \"+str(epoch_begin)+\"\\nITRF final: \"+str(itrf_final)+\"\\nEpoca final: \"+str(epoch_final)+\"\\nModelo de velocidad: \"+velocity+\"\\nFecha de la solicitud de la transformación: \"+date+\"\\n\\n\\nSaludos Cordiales,\\n\\nEquipo de Geodesia del IGVSB.\"\n return message", "def send_welcome_mail(backend, details, response, user, is_new=False, *args, **kwargs):\n\n if is_new:\n context = Context({'user': user, 'ga_campaign_params' : 'utm_source=unishared&utm_content=v1&utm_medium=e-mail&utm_campaign=welcome_mail'})\n\n email_task.apply_async([u'Welcome on UniShared!', context, 'welcome_mail', [user.email]], eta= datetime.utcnow() + timedelta(hours=1))", "def notify_user(self, svno, ops):\n\n self.sr=svno\n self.ops=ops\n try:\n from email.mime.text import MIMEText\n from email.mime.multipart import MIMEMultipart\n except Exception, imperr:\n print(\"emailNotify failure - import error %s\" % imperr)\n return(-1)\n nHtml = []\n noHtml = \"\"\n clientEmail = ['helpdesk@mscsoftware.com']\n msg = MIMEMultipart()\n # This is the official email notifier\n rtUser = 'DONOTREPLY@mscsoftware.com'\n\n msg['From'] = rtUser\n msg['To'] = \", \".join(clientEmail)\n if self.data['groupid'] == 'Nastran-RG':\n msg[\"Cc\"] = \"msc-itsupport@mscsoftware.com,\\\n DL-ENG-BUILD@mscsoftware.com,\\\n raj.behera@mscsoftware.com\"\n elif self.data['groupid'] == 'Patran-RG':\n msg[\"Cc\"] = \"msc-itsupport@mscsoftware.com,\\\n DL-ENG-BUILD@mscsoftware.com,\\\n raj.behera@mscsoftware.com\"\n else:\n msg[\"Cc\"] = \"msc-itsupport@mscsoftware.com,\\\n DL-ENG-BUILD@mscsoftware.com,\\\n raj.behera@mscsoftware.com\"\n\n if self.ops == 'ipnw':\n msg['Subject'] = '%s regression got impacted due \\\n to vCAC cloud for VMID %s' % \\\n ( pdict[self.data['groupid']], self.sr['requestNumber'])\n else:\n msg['Subject'] = '%s regression got impacted due \\\n to vCAC cloud for service request: %s' % \\\n ( pdict[self.data['groupid']], self.sr['requestNumber'])\n\n nHtml.append(\"<html> <head></head> <body> <p>Jenkin's \\\n vCAC cloud client notification<br>\")\n nHtml.append(\"<b>Hi Helpdesk,</b><br><br><br>\")\n nHtml.append(\"Please create a ticket to solve the \\\n following problem and notify infra team.\")\n if self.ops == 'ipnw':\n nHtml.append(\"VM creation readiness from vCAC \\\n cloud is taking long time, \\\n vm creation service request completed, \\\n But network configuration is having an issue \\\n for VMID <b>%s</b> is stuck. \" % self.sr['requestNumber'])\n else:\n nHtml.append(\"Creation of VM through vCAC cloud is taking \\\n longer time than expected, the service \\\n request <b>%s</b> is stuck. \" % self.sr['requestNumber'])\n\n nHtml.append(\"Regression test for product <b>%s</b> \\\n is stuck and impacted.<br><br>\" % \\\n pdict[self.data['groupid']])\n if os.path.isdir(self.data['rundir']):\n jnfilepath=os.path.join(self.data['rundir'], 'hudjobname.dat')\n if os.path.isfile(jnfilepath):\n lines = [line.rstrip() for line in open(jnfilepath)]\n nHtml.append(\"Please follow job link for \\\n SR# related information.<br>\")\n nHtml.append(\"Jenkins Effected Job URL: <a href=%s> \\\n Effected Build Console \\\n </a><br><br><br>\" % (lines[0]))\n\n nHtml.append(\"This needs immediate attention.<br><br>\")\n nHtml.append(\"Regards,<br>\")\n nHtml.append(\"Rtest Administrator.<br>\")\n nHtml.append(\"[Note: This is an automated mail,\\\n Please do not reply to this mail.]<br>\")\n nHtml.append(\"</p> </body></html>\")\n noHtml = ''.join(nHtml)\n noBody = MIMEText(noHtml, 'html')\n msg.attach(noBody)\n s = smtplib.SMTP('postgate01.mscsoftware.com')\n s.sendmail(rtUser, [clientEmail] + msg[\"Cc\"].split(\",\"), msg.as_string())\n s.quit()\n return 0", "def get_last_trial_communication_email(account):\n\n SUBJECT = \"Foojal: Your trial is over!\"\n EMAIL_CONTENT = \"\"\"\n\nHello %s\n\nWe hope you liked your Foojal.com trial and that you will join us for a full year for only $24.00.\n\nTo get a full year subscription to the best online photo food journal, go to your account page at http://app.foojal.com/account.\n\nIf you have any questions, please email us; we would love to talk with you.\n\nThank you, Kathy and Adam\n\n\"\"\"\n message = EmailMessage()\n message.sender = settings.SITE_EMAIL\n message.to = account.user.email()\n message.subject = SUBJECT\n message.body = EMAIL_CONTENT % account.nickname\n return message", "def _get_message_body(self, template_file, message_data):\r\n\r\n msg = \"\"\"\r\nThe import for user {username} has failed to import. The path to the import\r\nis:\r\n\r\n{file_path}\r\n\r\nError:\r\n\r\n{exc}\r\n\r\n\"\"\".format(**message_data)\r\n return msg", "def notify_email(subj, message, json, logger=None):\n\n fname = os.path.join(os.path.dirname(__file__), \"config_emails.conf\")\n elist = read_file_aslist(fname, logger)\n\n if logger is not None:\n logger.debug(\"\"\"\nSubject: {}\nMessage: {}\nJson: {}\nEmails: {}\n \"\"\".format(subj, message, json, elist))", "def get_personalized_notification_email_text(personal_id):\n return notification_email_text % (personal_id, personal_id)", "def build_hello_email():\n from_email = Email(\"test@example.com\")\n subject = \"Hello World from the SendGrid Python Library\"\n to_email = Email(\"test@example.com\")\n content = Content(\"text/plain\", \"some text here\")\n mail = Mail(from_email, subject, to_email, content)\n mail.personalizations[0].add_to(Email(\"test2@example.com\"))\n\n return mail.get()", "def generate_email(self):\n email_dict = {'donor_name':self.name,\n 'donation_amount':self.last_donation(),\n 'total_amount':self.total_donations()}\n\n # Create formatted email that can be copied & pasted\n email = ('\\n'.join(['Dear {donor_name},','',\n 'Thank you for your generous donation of ${donation_amount:.2f}.',\n 'To date, you have donated a total of ${total_amount:.2f} to our charity.',\n 'Your contributions help new arrivals receive the highest quality care possible.',\n 'Please know that your donations make a world of difference!',\n '','Sincerely,','The Good Place Team'])).format(**email_dict)\n\n return(email)", "def send_reminder():\n\n name = config[\"email\"][\"name\"]\n user = config[\"email\"][\"user\"]\n subject = \"REMINDER: %s\" % sys.argv[1]\n body = sys.argv[2] if len(sys.argv) > 2 else \"\"\n email_helper.send(user, name, user, subject, body)", "def send_reminder(self):\n message_contents = \"This is a reminder that your event: \" + self.event_title + \" takes place on \" + self.event_date + \" in \" + self.event_location\n subject = \"Event Reminder\"\n attendees = self.gameplanuser_set.all()\n for attendee in attendees:\n remindermessage = Message.objects.create(sender=self.event_manager, recipient=attendee, contents=message_contents)\n remindermessage.save()", "def create_text_messages(file_path: str) -> None:\n\n data = load_data(file_path)\n month_name = file_path[file_path.rindex(\"\\\\\", -14) + 1:-4]\n\n messages = create_messages(data, month_name)\n success = send_messages(messages)\n print(success)", "def _get_message_body(self, template_file, message_data):\r\n\r\n msg = \"\"\"\r\nYour bookmark import is complete! We've begun processing your bookmarks to\r\nload their page contents and fulltext index them. This process might take a\r\nwhile if you have a large number of bookmarks. Check out your imported\r\nbookmarks at https://bmark.us/{username}/recent.\r\n\r\n---\r\nThe Bookie Team\"\"\".format(**message_data)\r\n return msg", "def send_ctr_alert(date, ctr):\n sender = \"team1_rs@outlook.com\"\n receivers = [\"alexa.hernandez@mail.mcgill.ca\"]\n msg = MIMEText(\n f\"Hello Team1,\\n\\nToday's CTR has dropped below {str(MIN_CTR*100)}%. The CTR is {str(ctr*100)}%.\\nPlease \"\n f\"investigate immediately.\"\n )\n\n msg[\"Subject\"] = \"Team1 Recommendation Service - CTR Alert\"\n msg[\"From\"] = sender\n msg[\"To\"] = \";\".join(receivers)\n\n try:\n smtpObj = smtplib.SMTP(\"smtp.office365.com\", 587)\n smtpObj.ehlo()\n smtpObj.starttls()\n smtpObj.login(\"team1_rs@outlook.com\", \"team1*rs\")\n smtpObj.sendmail(sender, receivers, msg.as_string())\n print(\"Successfully sent email\")\n except smtplib.SMTPException as e:\n print(\"Error: unable to send email\")", "def send_feedback_email_task(subject, message, sender, reciever):\n logger.info(\"Reminder email\")\n return send_reminder_mail(subject, message, sender, reciever)", "def _get_message_body(self, template_file, message_data):\r\n return \"\"\"\r\nPlease click the link below to activate your account.\r\n\r\n{0}\r\n\r\nWe currently support importing from Google Bookmarks and Delicious exports.\r\nImporting from a Chrome or Firefox export does work, however it reads the\r\nfolder names in as tags. So be aware of that.\r\n\r\nGet the Chrome extension from the Chrome web store:\r\nhttps://chrome.google.com/webstore/detail/knnbmilfpmbmlglpeemajjkelcbaaega\r\n\r\nIf you have any issues feel free to join #bookie on freenode.net or report\r\nthe issue or idea on https://github.com/bookieio/Bookie/issues.\r\n\r\nWe also encourage you to sign up for our mailing list at:\r\nhttps://groups.google.com/forum/#!forum/bookie_bookmarks\r\n\r\nand our Twitter account:\r\nhttp://twitter.com/BookieBmarks\r\n\r\nBookie is open source. Check out the source at:\r\nhttps://github.com/bookieio/Bookie\r\n\r\n---\r\nThe Bookie Team\"\"\".format(message_data)", "def test_template():\n \n # Keywords and values to be filled into the template\n items = {'item_1': 'First', 'long_keyword_item_2': 'Second',\n 'space_3': 'Third Third Third ', 'item_4': 'Fourth',\n 'item_5': None}\n \n sender = 'dummy@moc.org'\n receiver = 'dummy@moc.org'\n result = 'First Second\\nThird Third Third Fourth\\n'\n \n # TEST_DIR = os.path.dirname(os.path.abspath(__file__))\n template = os.path.abspath(os.path.join(TEST_DIR, 'test_template.txt'))\n\n msg = TemplateMessage(sender=sender, email=receiver, template=template,\n **items)\n assert msg.body == result", "def email_body_meeting_reminder():\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">Drats. <a href=\"#\" style=\"color:#1488CC\">{insert seller name} cancelled your appointment</a>.<br><br>'\n\tmsg = msg + '\\t\\t\\t <a href=\"#\" style=\"color:#1488CC\">Reschedule</a> or you can send a message to inquire about the cancellation. <br><br>'\n\tmsg = msg + '\\t\\t\\t And, don\\'t worry! You won\\'t be charged, promise. </font><br><br>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def notification(self, approver_list):\n dns_name = axops_client.get_dns()\n job_id = self.root_id\n url_to_ui = 'https://{}/app/jobs/job-details/{}'.format(dns_name, job_id)\n service = axops_client.get_service(job_id)\n\n html_payload = \"\"\"\n<html>\n<body>\n <table class=\"email-container\" style=\"font-size: 14px;color: #333;font-family: arial;\">\n <tr>\n <td class=\"msg-content\" style=\"padding: 20px 0px;\">\n The {} job is waiting for your approval. The job was triggered by {}.\n </td>\n </tr>\n <tr>\n <td class=\"commit-details\" style=\"padding: 20px 0px;\">\n <table cellspacing=\"0\" style=\"border-left: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;border-top: 1px solid #e3e3e3;\">\n <tr>\n <td class=\"item-label\" style=\"font-weight: bold;height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;\">Author</td>\n <td class=\"item-value\" style=\"height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;\">{}</td>\n </tr>\n <tr>\n <td class=\"item-label\" style=\"font-weight: bold;height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;\">Repo</td>\n <td class=\"item-value\" style=\"height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;\">{}</td>\n </tr>\n <tr>\n <td class=\"item-label\" style=\"font-weight: bold;height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;\">Branch</td>\n <td class=\"item-value\" style=\"height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;\">{}</td>\n </tr>\n <tr>\n <td class=\"item-label\" style=\"font-weight: bold;height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;\">Description</td>\n <td class=\"item-value\" style=\"height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;\">{}</td>\n </tr>\n <tr>\n <td class=\"item-label\" style=\"font-weight: bold;height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;\">Revision</td>\n <td class=\"item-value\" style=\"height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;\">{}</td>\n </tr>\n </table>\n </td>\n </tr>\n <tr>\n <td class=\"view-job\">\n <div>\n <!--[if mso]>\n <v:roundrect xmlns:v=\"urn:schemas-microsoft-com:vml\" xmlns:w=\"urn:schemas-microsoft-com:office:word\" href=\"{}\" style=\"height:40px;v-text-anchor:middle;width:150px;\" arcsize=\"125%\" strokecolor=\"#00BDCE\" fillcolor=\"#7fdee6\">\n <w:anchorlock/>\n <center style=\"color:#333;font-family:arial;font-size:14px;font-weight:bold;\">VIEW JOB</center>\n </v:roundrect>\n<![endif]--><a href=\"{}\" style=\"background-color:#7fdee6;border:1px solid #00BDCE;border-radius:50px;color:#333;display:inline-block;font-family:arial;font-size:14px;font-weight:bold;line-height:40px;text-align:center;text-decoration:none;width:150px;-webkit-text-size-adjust:none;mso-hide:all;\">VIEW JOB</a></div>\n </td>\n </tr>\n <tr>\n <td class=\"view-job\">\n <div>\n <!--[if mso]>\n <v:roundrect xmlns:v=\"urn:schemas-microsoft-com:vml\" xmlns:w=\"urn:schemas-microsoft-com:office:word\" href=\"{}\" style=\"height:40px;v-text-anchor:middle;width:150px;\" arcsize=\"125%\" strokecolor=\"#00BDCE\" fillcolor=\"#7fdee6\">\n <w:anchorlock/>\n <center style=\"color:#333;font-family:arial;font-size:14px;font-weight:bold;\">APPROVE</center>\n </v:roundrect>\n<![endif]--><a href=\"{}\" style=\"background-color:#7fdee6;border:1px solid #00BDCE;border-radius:50px;color:#333;display:inline-block;font-family:arial;font-size:14px;font-weight:bold;line-height:40px;text-align:center;text-decoration:none;width:150px;-webkit-text-size-adjust:none;mso-hide:all;\">APPROVE</a></div>\n </td>\n </tr>\n <tr>\n <td class=\"view-job\">\n <div>\n <!--[if mso]>\n <v:roundrect xmlns:v=\"urn:schemas-microsoft-com:vml\" xmlns:w=\"urn:schemas-microsoft-com:office:word\" href=\"{}\" style=\"height:40px;v-text-anchor:middle;width:150px;\" arcsize=\"125%\" strokecolor=\"#00BDCE\" fillcolor=\"#7fdee6\">\n <w:anchorlock/>\n <center style=\"color:#333;font-family:arial;font-size:14px;font-weight:bold;\">DECLINE</center>\n </v:roundrect>\n<![endif]--><a href=\"{}\" style=\"background-color:#7fdee6;border:1px solid #00BDCE;border-radius:50px;color:#333;display:inline-block;font-family:arial;font-size:14px;font-weight:bold;line-height:40px;text-align:center;text-decoration:none;width:150px;-webkit-text-size-adjust:none;mso-hide:all;\">DECLINE</a></div>\n </td>\n </tr>\n <tr>\n <td class=\"thank-you\" style=\"padding-top: 20px;line-height: 22px;\">\n Thanks,<br>\n Argo Project\n </td>\n </tr>\n </table>\n</body>\n</html>\n\"\"\"\n\n for user in approver_list:\n\n approve_token, decline_token = self.generate_token(user=user, dns_name=dns_name)\n\n approve_link = \"https://{}/v1/results/id/approval?token={}\".format(dns_name, approve_token)\n decline_link = \"https://{}/v1/results/id/approval?token={}\".format(dns_name, decline_token)\n\n msg = {\n 'to': [user],\n 'subject': 'The {} job requires your approval to proceed'.format(service['name']),\n 'body': html_payload.format(service['name'], service['user'],\n service['commit']['author'], service['commit']['repo'],\n service['commit']['branch'], service['commit']['description'], service['commit']['revision'],\n url_to_ui, url_to_ui, approve_link, approve_link, decline_link, decline_link),\n 'html': True\n }\n\n if service['user'] != 'system':\n try:\n user_result = axops_client.get_user(service['user'])\n msg['display_name'] = \"{} {}\".format(user_result['first_name'], user_result['last_name'])\n except Exception as exc:\n logger.error(\"Fail to get user %s\", str(exc))\n\n logger.info('Sending approval requests to %s', str(user))\n result = axsys_client.send_notification(msg)\n\n # TODO: Tianhe adding retry mechanism\n if result.status_code != 200:\n logger.error('Cannot send approval request, %s', result.content)\n sys.exit(1)\n logger.info('Successfully sent approval requests to reviewers.')", "def generate_plain_mesg(info, open_quests, owner, tags):\n\n msg = (\n \"This email is being sent to {} because that is the owner listed\\n\"\n \"for the systems with open Hermes labors listed below.\\n\\n\"\n \"Due dates, if any, are noted with each quest.\\n\".format(owner)\n )\n msg += (\n \"\\nTo throw an event manually, you can run the following command \"\n \"on a shell server:\"\n \"\\n\\n\"\n \"$ hermes event create [event] --host [hostname].\\n\\n\"\n \"Or you can visit the quests linked below.\\n\\n\".format(\n settings.frontend)\n )\n for quest_id in info[owner]:\n quest = find_quest(open_quests, quest_id)\n if quest:\n msg += (\n \"==[ QUEST {} ]================================\\n\"\n \"CREATOR: {}\\n\"\n ).format(\n quest_id, quest.creator\n )\n if quest.target_time:\n msg += \"DUE: {}\\n\".format(quest.target_time)\n msg += \"DESC: \\\"{}\\\"\\n\".format(textwrap.fill(\n quest.description,\n width=60, subsequent_indent=\"\"\n ))\n msg += \"LINK: {}/v1/quests/{}\\n\\n\".format(\n settings.frontend, quest_id\n )\n else:\n msg += \" Labors not associated with a quest:\\n\\n\"\n\n msg += \"Machines with labors:\\n\"\n\n for hostname in sorted(info[owner][quest_id]):\n if tags[hostname]:\n tags_str = \"{}\".format((\", \".join(tags[hostname])))\n else:\n tags_str = \"no services\"\n msg += \" {} ({})\\n\".format(hostname, tags_str)\n\n msg += \"\\n\\n\"\n\n return msg", "def createThankYouEmail(self):\n result = (\"\\nDear {:s},\\n\\n\"\n \"\\tThank you so much for your generous donation of ${:,.2f}!\\n\\n\"\n \"\\tIt will be put to very good use.\\n\\n\"\n \"\\t\\tSincerely,\\n\\t\\t\\t- The Team\".format(self.name, self.getTotDonation())\n )\n return result", "def construct_email_content(self):\n # Construct header of the message\n content = MAIL_HEAD_CONTENT.replace(\"TITLE_HOLDER\", self.title).replace('FAIL_JOB_HOLDER',\n self.fail_job_content).replace(\n \"TIME_HOLDER\", os.getenv(\"START_TIME\")).replace(\"GRAPH_HOLDER\", os.getenv(\"BENCHMARK_GRAPH\")).replace(\n \"JOB_HOLDER\", os.getenv(\"BENCHMARK_TYPE\")).replace(\"DEVICE_HOLDER\", os.getenv(\"DEVICE_TYPE\")).replace(\"CUDA_HOLDER\", os.getenv(\"VERSION_CUDA\")).replace('DISPLAY', self.job_display)\n\n if not self.alarm_info:\n return\n # Construct alarm content\n content += self.alarm_info\n # Construct the tail of the message\n content += MAIL_TAIL_CONTENT.replace(\"BENCHMARK_WEBSITE1\", os.getenv(\"BENCHMARK_WEBSITE1\", \"\")).strip().replace(\n 'RUN_ENV_HOLDER', self.env_content).replace(\"BENCHMARK_WEBSITE2\", os.getenv(\"BENCHMARK_WEBSITE2\"))\n\n with open(os.path.join(self.log_path, \"mail.html\"), \"w\") as f_object:\n f_object.write(content)", "def _get_message_body(self, template_file, message_data):\r\n return \"\"\"\r\nHello {username}:\r\n\r\nPlease activate your Bookie account by clicking on the following url:\r\n\r\n{url}\r\n\r\n---\r\nThe Bookie Team\"\"\".format(**message_data)\r\n # lookup = config['pylons.app_globals'].mako_lookup\r\n # template = lookup.get_template(template_file)\r\n\r\n # # template vars are a combo of the obj dict and the extra dict\r\n # template_vars = {'data': message_data}\r\n # return template.render(**template_vars)\r", "def stock_email_blast(stock_dict, notification_time):\n\n with bigbeta_app.app_context():\n print('sending email')\n user_list = build_users_list()\n msg = Message('Big Mover in the Market!',\n sender=email_sender,\n recipients=['jonmbrenner@gmail.com'])\n # recipients=[user_list])\n msg.body = f\"\"\"\\\n!!!HIGH SHORT INTEREST MOVER ALERT!!!\n${stock_dict['ticker']}\nShort Interest: {stock_dict['short_interest']}\nFloat: {stock_dict['free_float']}\nDays to Cover: {stock_dict['dtc']}\nRelative Volume: {stock_dict['rvol']}\nNews Catalysts: {stock_dict['stories']}\n\nLast Price: {stock_dict['last_price']} collected at {cur_tm_log}\nNotification kicked off at {notification_time} EST\n\nGo get it!\n- BigBeta Team\n\"\"\"\n\n mail.send(msg)\n\n return None", "def send_contact_us_receipt_email(**data):\n mail_file = os.path.join(APP_PATH, \"templates\", \"main\",\n \"contact-us-receipt\", \"content.txt\")\n with open(mail_file, \"r\") as f:\n msg_text = f.read()\n msg_html = render_template(\"main/contact-us-receipt/content.html\")\n msg = Message(\n f'[SetNow Support] Re: {data[\"subject\"]}',\n sender=\"setnow@tuta.io\",\n recipients=[data[\"email\"]],\n )\n msg.body = msg_text\n msg.html = msg_html\n mail.send(msg)", "def delegate_last_day():\n\n regs = Registration.objects.all()\n\n template = 'notifications/last_day_mail.html'\n\n for reg in regs:\n subject = 'SciPy.in 2011: Schedule and other details'\n message = loader.render_to_string(\n template, dictionary={'name': reg.registrant.username})\n\n reg.registrant.email_user(subject=subject, message=message,\n from_email='madhusudancs@gmail.com')", "def send_main_email(self):\n\n print \"Sending main email\"\n \n # Make an html table to be body of email\n html_table = '<table style=\"font-size:12px\">'\n html_table += self.make_nfs_changed_rows(\"sprint\") # New features only\n html_table += self.make_nfs_changed_rows(\"status\") # New features only\n html_table += self.make_time_in_status_rows(self.stalled_nf_issues) \n html_table += self.make_time_in_status_rows(self.stalled_st_issues) # Sub-tasks\n html_table += '</table>' # Closing table tag\n\n recipients = self.config.get(\"recipients\", \"emails\").split(\"\\n\") # [recipients] section in .ini file\n \n# emails = self.config.items('recipients')\n# for key, email in emails:\n# recipients = ', '.join(self.config.items('recipients'))\n \n print recipients\n# sys.exit()\n self.send_email(recipients, html_table)", "def send_message():\r\n global count\r\n sutasfolder = os.path.join(os.path.expanduser('~'), \"Sutas_Logs\")\r\n slckobj = SlackNotification().slackobj()\r\n slc = slckobj.send_message()\r\n tmsobj = TeamsNotification().teamsobj()\r\n tms = tmsobj.send_message()\r\n globfilepath = os.path.join(expanduser('~'), \"global_conf.yaml\")\r\n globdata = get_data_from_yaml(globfilepath)\r\n if \"logpath\" in os.environ:\r\n # getting the testsuite name from logpath\r\n mailfile = os.path.basename(os.environ['logpath']).split(\r\n str(datetime.datetime.now().year))[0]\r\n # Inside testsuite folder in sutaslogs we are creating a file with\r\n # testsuite name. This file will be used to store the notification\r\n # messages\r\n mailfile = os.path.join(os.path.dirname(os.environ['logpath']),\r\n mailfile)\r\n if os.path.isfile(mailfile):\r\n if count == 0:\r\n os.remove(mailfile)\r\n count = 1\r\n # suitelogpaths file is created in sutaslog folder which is\r\n # in user's home directory.\r\n suitelogpaths = os.path.join(sutasfolder, \"suitelogpaths\")\r\n flag = False\r\n \r\n if globdata.get('Consolidatedmail','no').lower() == 'yes':\r\n mode = \"a\"\r\n else:\r\n mode = \"w\"\r\n # Checks if suitelogpath file already exists.\r\n if os.path.isfile(suitelogpaths):\r\n # checking if the logpath is already in the suitelogpaths file.\r\n # if path exists then continue else writes the path in to file.\r\n with open(suitelogpaths, 'r') as suite:\r\n for line in suite.read().strip().splitlines():\r\n if os.environ['logpath'] in line:\r\n flag = True\r\n if not flag:\r\n with open(suitelogpaths, mode) as suite: \r\n suite.write(os.environ['logpath'])\r\n suite.write('\\n')\r\n else:\r\n # creates suitelogpaths file if doesn't exist and writes\r\n # log path in to it.\r\n with open(suitelogpaths, mode) as suite: \r\n suite.write(os.environ['logpath'])\r\n suite.write('\\n')\r\n #writing notification messages in to a testsuite file which is\r\n #created in testsuite folder.\r\n with open(mailfile, 'a') as agg:\r\n agg.write(os.environ[\"sutasmessages\"])\r\n os.environ[\"sutasmail\"] = os.environ[\"sutasmessages\"]\r\n os.environ[\"sutasmessages\"] = \"\"\r\n msgs = {\"slack\": slc, \"teams\": tms}\r\n if slc != \"success\" or tms != \"success\":\r\n return msgs\r\n else:\r\n return \"success\"", "def delegate_about_event():\n\n regs = Registration.objects.all()\n\n template = 'notifications/sprints_about_mail.html'\n\n for reg in regs:\n subject = 'SciPy.in 2011: Details of the individual events'\n message = loader.render_to_string(\n template, dictionary={'name': reg.registrant.username})\n\n reg.registrant.email_user(subject=subject, message=message,\n from_email='madhusudancs@gmail.com')", "def generate_notice_email(notice):\n subject = notice.subject\n from_email = settings.DEFAULT_FROM_ADDR\n reply_to_email = settings.EMAIL_TARGET_S\n to_email = notice.email_to.email\n\n context = {'object': notice}\n\n cont_html = render_to_string('emails/email_notice.html', context)\n cont_text = render_to_string('emails/email_notice.txt', context)\n\n email = EmailMultiAlternatives(subject, cont_text, from_email, [to_email], reply_to=[reply_to_email])\n email.attach_alternative(cont_html, \"text/html\")\n\n return email", "def send_notification(self):\n # Sending the notification\n tbs = TestBuild.objects.filter(pk__in = self.create_builds_list)\n tbs = tbs.order_by('product')\n\n tbp_pks = list(set(tbs.values_list('product', flat=True)))\n ps = Product.objects.filter(pk__in = tbp_pks)\n\n message = MAIL_HEADER\n\n line = '=' * 30 + '\\n'\n\n for p in ps:\n p_str = unicode(p)\n\n message += line + p_str + '\\n' + line\n for tb in tbs:\n if tb.product == p:\n message += '* ' + unicode(tb) + '\\n'\n message += '\\n'\n\n mail_to = []\n for admin in ADMINS:\n mail_to.append(admin[1])\n\n send_mail(MAIL_SUBJECT, message, MAIL_FROM, mail_to)", "def template_message(include_title=False, template='markdown.md.j2', exclude_labels=True, current_length=0, **kwargs):\n processed = {'message': ''}\n alerts_count = len(kwargs['alerts'])\n title = f\"{alerts_count} alert(s) received\"\n if not include_title:\n processed.update({'title': f\"{title}\"})\n title = None\n processed['message'] = render_template(\n template,\n title=title,\n alerts=kwargs['alerts'],\n external_url=kwargs['external_url'],\n receiver=kwargs['receiver'],\n exclude_labels=exclude_labels,\n current_length=current_length,\n )\n for alert in kwargs['alerts']:\n if int(alert['annotations'].get('priority', -1)) > processed.get('priority', -1):\n processed['priority'] = int(alert['annotations']['priority'])\n return processed", "def sendNotification(self):\n if not(self.errors or self.accounting):\n return S_OK()\n\n emailBody = \"\"\n rows = []\n for instanceName, val in self.accounting.iteritems():\n rows.append([[instanceName],\n [val.get('Treatment', 'No Treatment')],\n [str(val.get('LogAge', 'Not Relevant'))]])\n\n if rows:\n columns = [\"Instance\", \"Treatment\", \"Log File Age (Minutes)\"]\n emailBody += printTable(columns, rows, printOut=False, numbering=False, columnSeparator=' | ')\n\n if self.errors:\n emailBody += \"\\n\\nErrors:\"\n emailBody += \"\\n\".join(self.errors)\n\n self.log.notice(\"Sending Email:\\n\" + emailBody)\n for address in self.addressTo:\n res = self.nClient.sendMail(address, self.emailSubject, emailBody, self.addressFrom, localAttempt=False)\n if not res['OK']:\n self.log.error(\"Failure to send Email notification to \", address)\n continue\n\n self.errors = []\n self.accounting.clear()\n\n return S_OK()", "def __str__(self):\n email_template = '\\n'.join((f'\\n\\nDear {self._full_name},\\n',\n f'Thank you for your very kind donation of ${self.last_donation:.2f}.\\n',\n 'It will be put to very good use.\\n',\n ' Sincerely,',\n ' -The Team\\n'))\n return email_template", "def notify_email(kwargs):\n SMTP_mail_secret_name = \"\" # setting up your AWS secret name\n email_creds = aws.get_secret(SMTP_mail_secret_name, '[regoin]') # setting the regoin to credentials\n emailfrom = email_creds['accountname']\n emailsto = ['[mail receiver]'] # setting up mail receiver\n emailscc = ['[mail cc ]'] # setting up mail cc\n print(f\"Sender: {emailfrom}\")\n\n username = email_creds['username']\n password = email_creds['password']\n server = email_creds['server']\n print(f\"Server: {server}\")\n\n \"\"\"Send custom email alerts.\"\"\"\n print(\"kwargs >>>> \", kwargs)\n ti = kwargs['ti']\n dag_run = kwargs['dag_run']\n var = kwargs['var']['json']\n params = kwargs['params']\n print(f\"ti: {ti}\")\n print(f\"dag_run: {dag_run}\")\n\n ### Get exception then parsing it\n if kwargs.get('exception') is not None and type(kwargs.get('exception')) == list:\n dh_excpt = \"During handling of the above exception, another exception occurred:\"\n matching_main = [s for s in kwargs['exception'] if \"/main.py\" in s]\n print(\"matching_main >>>> \", matching_main)\n \n if matching_main != []:\n matching_fist_text = matching_main[0]\n print(\"matching_fist_text >>>> \", matching_fist_text)\n matching_fist_index = kwargs['exception'].index(matching_fist_text)\n print(\"matching_fist_index >>>> \", matching_fist_index)\n\n matching_last_text = matching_main[-1]\n print(\"matching_last_text >>>> \", matching_last_text)\n matching_last_index = kwargs['exception'].index(matching_last_text)\n print(\"matching_last_index >>>> \", matching_last_index)\n\n if dh_excpt in kwargs['exception']:\n dhe_index = kwargs['exception'].index(dh_excpt)\n print(\"The index of dhe >>>> \", dhe_index)\n\n if matching_fist_index < dhe_index:\n # when \"/main.py\" first show before \"During handling...\" then remove after \"During handling...\" text until the end\n kwargs['exception'][dhe_index:] = []\n elif matching_fist_index > dhe_index:\n # when \"/main.py\" first show after \"During handling...\" then remove after another text until the end\n kwargs['exception'][matching_last_index+2:] = []\n\n formatted_exception = \"\\n\".join(kwargs['exception'])\n print(f\"formatted_exception: {formatted_exception}\")\n elif kwargs.get('exception') is not None: \n formatted_exception = kwargs['exception']\n print(f\"formatted_exception: {formatted_exception}\")\n\n title = ''\n body = ''\n print(\"dag_run.run_id >>>> \", dag_run.run_id)\n print(\"ti.task_id >>>> \", ti.task_id)\n print(\"ti.state >>>> \", ti.state)\n\n print(\"When ti.state == State.FAILED >>>> \") # ti.state == State.FAILED as same as ti.state == 'failed'\n title = f\"[TEST] Airflow alert: ({dag_run.run_id}) failed on ({ti.task_id})\"\n body = f\"Dears, \\n\\n\\n\" + \\\n f\"The job_id ({dag_run.run_id}) failed on ({ti.task_id}). \\n\" + \\\n f\"Check what goes wrong, the ERROR message is shown as below: \\n\\n\" + \\\n f\"{formatted_exception} \\n\\n\" + \\\n f\"Forever yours, \\n\" + \\\n f\"RDP Data Team\"\n print(\"check title >>>> \\n\", title)\n print(\"check body >>>> \\n\", body)\n print(f\"Prepare to send out the mail...\\n\\t\\tsubject: {title}\") \n se.email(emailfrom, emailsto, emailscc, username, password, server, body, subject = title)\n print(\"The email send out done.\")\n raise AirflowException(f\"AirflowException: Pleaes check what goes wrong this job_id ({dag_run.run_id}) failed on ({ti.task_id}).\")", "def send_new_email(user):\n token = user.get_token()\n message = Message(\n 'Verify Your New Email',\n sender='storcwebsite@gmail.com',\n recipients=[user.temp_email])\n message.body = f\"The email address associated with your Storc \" \\\n f\"account has changed.\\n\\nTo verify your new email address, \" \\\n f\"please click the link below:\\n\\n\" \\\n f\"{url_for('users.new_email', token=token, _external=True)}\"\n mail.send(message)", "def send_warning(self):\n\n # Check whether all the necessary parameters for SMS are present\n if self.your_phone != '' and self.twilio_phone != '' and self.account_sid != '' and self.auth_token != '':\n client = Client(self.account_sid, self.auth_token)\n\n try:\n sms = client.messages.create(\n body=\"\"\"Last will: It was at least 30 days since your last check in. \n This is a reminder to check in in the next 24 hours.\"\"\",\n from_=self.twilio_phone,\n to=self.your_phone)\n sms\n print(\"\\nSMS sent\")\n except Exception as e:\n print(f\"An error occurred while trying to send the SMS. Error: {e}\")\n\n else:\n print(\"\\nMissing SMS parameters. SMS not sent\")\n\n # Check whether all the necessary parameters for email are present\n if self.sender_name != '' and self.recipient_email != '' and self.email != '' and self.email_pwd != '':\n message = f\"\"\"It has been at least 30 days since you last checked in. \nYou need to check in in the next 24 hours.\\n\nOtherwise at {self.deadline} the email with the important info will be sent to the designated recipient.\\n\nIn order to reset simply go to the working directory and run python3 last_will.py\"\"\"\n\n # send_email will return 0 if everything went ok, otherwise it will return an error message\n status = send_email(self.sender_name, self.your_email,\n self.email, self.email_pwd,\n subject='Last will: Reminder to check in', unencrypted_message=message)\n\n if status != 0:\n print(status)\n exit(1)\n else:\n print(\"Email sent\\n\")\n\n print(f\"You have until {self.deadline} to check in. \"\n f\"In order to do that simply go to the working directory and run ./last_will.sh\\n\")\n else:\n print(\"Missing email parameters. Email not sent.\\n\")\n exit(1)", "def update_helpdesk(self, data):\n self.sr=data\n try:\n from email.mime.text import MIMEText\n from email.mime.multipart import MIMEMultipart\n except Exception, imperr:\n print(\"emailNotify failure - import error %s\" % imperr)\n return(-1)\n nHtml = []\n noHtml = \"\"\n clientEmail = ['helpdesk@mscsoftware.com']\n msg = MIMEMultipart()\n # This is the official email notifier\n rtUser = 'DONOTREPLY@mscsoftware.com'\n\n msg['From'] = rtUser\n msg['To'] = \", \".join(clientEmail)\n if self.data['groupid'] == 'Nastran-RG':\n msg[\"Cc\"] = \"msc-itsupport@mscsoftware.com,\\\n DL-ENG-BUILD@mscsoftware.com,\\\n raj.behera@mscsoftware.com\"\n elif self.data['groupid'] == 'Patran-RG':\n msg[\"Cc\"] = \"msc-itsupport@mscsoftware.com,\\\n DL-ENG-BUILD@mscsoftware.com,\\\n raj.behera@mscsoftware.com\"\n else: \n msg[\"Cc\"] = \"msc-itsupport@mscsoftware.com,\\\n DL-ENG-BUILD@mscsoftware.com,\\\n raj.behera@mscsoftware.com\"\n\n msg['Subject'] = 'Your Request SR# %s for VM provisioning \\\n reported failure for product %s' % \\\n\t\t\t ( self.sr['requestNumber'], pdict[self.data['groupid']] )\n nHtml.append(\"<html> <head></head> <body> <p>Jenkin's \\\n vCAC cloud client notification<br>\")\n nHtml.append(\"<b>Hi Helpdesk,</b><br><br><br>\")\n nHtml.append(\"Please create a ticket to solve \\\n the following problem and notify infra team.\")\n nHtml.append(\"VM creation readiness from vCAC cloud \\\n is reported failure, \\\n Product is <b>%s</b> is stuck.\" \\\n % pdict[self.data['groupid']])\n\n nHtml.append(\"Regression test for product <b>%s</b> \\\n is impacted.<br><br>\" % pdict[self.data['groupid']])\n if os.path.isdir(self.data['rundir']):\n jnfilepath=os.path.join(self.data['rundir'], 'hudjobname.dat')\n if os.path.isfile(jnfilepath):\n lines = [line.rstrip() for line in open(jnfilepath)]\n nHtml.append(\"Please follow job link for SR# \\\n related information.<br>\")\n nHtml.append(\"Jenkins Effected Job URL: \\\n <a href=%s> Effected Build \\\n Console</a><br><br><br>\" % (lines[0]))\n\n nHtml.append(\"This needs immediate attention.<br><br>\")\n nHtml.append(\"Regards,<br>\")\n nHtml.append(\"Rtest Administrator.<br>\")\n nHtml.append(\"[Note: This is an automated mail,\\\n Please do not reply to this mail.]<br>\")\n nHtml.append(\"</p> </body></html>\")\n noHtml = ''.join(nHtml)\n noBody = MIMEText(noHtml, 'html')\n msg.attach(noBody)\n s = smtplib.SMTP('postgate01.mscsoftware.com')\n s.sendmail(rtUser, [clientEmail] + \\\n msg[\"Cc\"].split(\",\"), msg.as_string())\n s.quit()\n return 0", "def notifySysOperator(self):\n msg = self.generateNotifyMessage()\n print(msg)\n # with smtplib.SMTP('smtp.gmail.com', 587) as smtp:\n # smtp.ehlo()\n # smtp.starttls()\n # smtp.ehlo()\n\n # smtp.login(\"aladinshixi@gmail.com\", \"qwerQWER123.\")\n\n # smtp.sendmail(\"aladinshixi@gmail.com\", \"aladinshixi@gmail.com\", msg)\n\n # smtp.close()\n return False", "def send_feedback_message_email(recipient_id, feedback_messages):\n email_subject_template = (\n 'You\\'ve received %s new message%s on your explorations')\n\n email_body_template = (\n 'Hi %s,<br>'\n '<br>'\n 'You\\'ve received %s new message%s on your Oppia explorations:<br>'\n '<ul>%s</ul>'\n 'You can view and reply to your messages from your '\n '<a href=\"https://www.oppia.org/creator_dashboard\">dashboard</a>.'\n '<br>'\n '<br>Thanks, and happy teaching!<br>'\n '<br>'\n 'Best wishes,<br>'\n 'The Oppia Team<br>'\n '<br>%s')\n\n if not feconf.CAN_SEND_EMAILS:\n log_new_error('This app cannot send emails to users.')\n return\n\n if not feconf.CAN_SEND_FEEDBACK_MESSAGE_EMAILS:\n log_new_error('This app cannot send feedback message emails to users.')\n return\n\n if not feedback_messages:\n return\n\n recipient_user_settings = user_services.get_user_settings(recipient_id)\n\n messages_html = ''\n count_messages = 0\n for exp_id, reference in feedback_messages.iteritems():\n messages_html += (\n '<li><a href=\"https://www.oppia.org/create/%s#/feedback\">'\n '%s</a>:<br><ul>' % (exp_id, reference['title']))\n for message in reference['messages']:\n messages_html += ('<li>%s<br></li>' % message)\n count_messages += 1\n messages_html += '</ul></li>'\n\n email_subject = email_subject_template % (\n (count_messages, 's') if count_messages > 1 else ('a', ''))\n\n email_body = email_body_template % (\n recipient_user_settings.username, count_messages if count_messages > 1\n else 'a', 's' if count_messages > 1 else '', messages_html,\n EMAIL_FOOTER.value)\n\n _send_email(\n recipient_id, feconf.SYSTEM_COMMITTER_ID,\n feconf.EMAIL_INTENT_FEEDBACK_MESSAGE_NOTIFICATION,\n email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)", "def create_email_body_for_update_resources(results):\n\n failures = [url for url in results if results[url][\"state\"] == \"Failed\"]\n warnings = [url for url in results if results[url][\"state\"] == \"Warning\"]\n successes = [url for url in results if results[url][\"state\"] == \"Succeeded\"]\n\n body, html_content = \"\", \"\"\n\n # Failed\n if failures:\n body += f\"Failed [{len(failures)}]\\n\\n\"\n html_content += f\"<h2>Failed [{len(failures)}]</h2>\\n\\n\"\n\n for url in failures:\n result = results[url]\n\n body += f\"Resource: {url}\\n\"\n html_content += f'<h3 style=\"color: red;\">Resource: {url}</h3>\\n'\n\n html_content += \"<ul>\\n\"\n for message in result[\"messages\"]:\n body += f\" {message}\\n\"\n html_content += f\"<li>{message}</li>\\n\"\n html_content += \"</ul>\\n\"\n body += \"\\n\\n\"\n\n # Warnings\n if warnings:\n body += f\"Warnings [{len(warnings)}]\\n\\n\"\n html_content += f\"<h2>Failed [{len(warnings)}]</h2>\\n\\n\"\n\n for url in warnings:\n result = results[url]\n\n body += f\"Resource: {url}\\n\"\n html_content += f'<h3 style=\"color: orange;\">Resource: {url}</h3>\\n'\n\n html_content += \"<ul>\\n\"\n for message in result[\"messages\"]:\n body += f\" {message}\\n\"\n html_content += f\"<li>{message}</li>\\n\"\n html_content += \"</ul>\\n\"\n body += \"\\n\\n\"\n\n # Succeeded\n if successes:\n body += f\"Succeeded [{len(successes)}]\\n\\n\"\n html_content += f\"<h2>Succeeded [{len(successes)}]</h2>\\n\"\n\n for url in successes:\n result = results[url]\n\n body += f\"Resource: {url}\\n\"\n html_content += f'<h3 style=\"color: green;\">Resource: {url}</h3>\\n'\n\n html_content += \"<ul>\\n\"\n for message in result[\"messages\"]:\n body += f\" {message}\\n\"\n html_content += f\"<li>{message}</li>\\n\"\n html_content += \"</ul>\\n\"\n body += \"\\n\\n\"\n\n body_html = f\"\"\"\n<!DOCTYPE html>\n<html lang=\"en\">\n <head>\n <title>Updated BEL Resources for {settings.HOST_NAME}</title>\n </head>\n <body>\n <div id=\"content\">{html_content}</div>\n </body>\n</html>\n \"\"\"\n\n return (body, body_html)", "def send_notification(self, context):\n subject = \"Order placed for %s [%s]\" % (context['product']['name'], context['name'])\n message = render_to_string('notification/product_notification.txt',\n context)\n try:\n send_mail(subject, message, settings.DEFAULT_FROM_EMAIL,\n self.get_recipients())\n except SMTPException as e:\n logger.error(\"Error sending notification: %s\" % e)\n else:\n logger.info(\"Sent notification for %s [%s]\" % (context['product']['name'], context['name']))", "def email_body_new_proposal_notification_to_seller(meeting, buyer_name, buyer_profile_id):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\">\\n<tbody>\\n\\t<tr><td align=\"center\" valign=\"top\">\\n\\t</td></tr>\\n</tbody>\\n</table>'\n\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\">'\n\tmsg = msg + '\\n<tbody><tr>'\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tbody>'\n\tmsg = msg + '\\n\\t\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\n\\t\\t\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" />'\n\tmsg = msg + '\\n\\t\\t\\t\\t</a>'\n\tmsg = msg + '\\n\\t\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t\\t</tbody>'\n\tmsg = msg + '\\n\\t</table>'\n\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;padding-left:75px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\n\\t\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">'\n\tmsg = msg + '\\n\\t\\t\\t\\tGreat! You received a new proposal from <a href=\\\"https://127.0.0.1:5000/profile?hero=' + buyer_profile_id + '\\\" style=\"color:#1488CC\">'+ buyer_name + '</a>.'\n\tmsg = msg + '\\n\\t\\t\\t\\t<br><br><br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tTime: ' + meeting.meet_ts.strftime('%A, %b %d, %Y %H:%M %p') + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tDuration: ' + meeting.get_duration_in_hours() + ' hours<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tLocation: ' + str(meeting.meet_location) + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tFee: $' + str(meeting.meet_cost) + '<br><br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tDescription: ' + meeting.get_description_html() + '<br><br>'\n\tmsg = msg + '\\n\\t\\t\\t</font><br><br>'\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:10px;padding-left:75px;padding-bottom:150px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\n\\t\\t\\t<a href=\\\"'+ meeting.accept_url() +'\\\" style=\"color:#ffffff;text-decoration: none;display: inline-block;min-height: 38px;line-height: 39px;padding-right: 16px;padding-left: 16px;background: #1488CC;font-size: 14px;border-radius: 3px;border: 1px solid #1488CC;font-family:Garamond, EB Garamond, Georgia, serif; width:50px;text-align:center;\" target=\"_blank\">Accept</a> '\n\tmsg = msg + '\\n\\t\\t\\t<a href=\\\"'+ meeting.reject_url() +'\\\" style=\"color:#ffffff;text-decoration: none;display: inline-block;min-height: 38px;line-height: 39px;padding-right: 16px;padding-left: 16px;background: #e55e62;font-size: 14px;border-radius: 3px;border: 1px solid #e55e62;font-family:Garamond, EB Garamond, Georgia, serif; width:50px;text-align:center\" target=\"_blank\">Reject</a> '\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t</table>'\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\n\\t\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\n\\t\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\n\\t\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t</table>'\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\n\\t\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t</table>'\n\n\tmsg = msg + '\\n</tr></tbody>'\n\tmsg = msg + '</table>'\n\treturn msg", "def get_email():\n return Email(\n subject='[Messages] Integration Test',\n body='Conducting Integration Testing',\n attachments=str(TESTDIR.joinpath('file2.png')))", "def send_notification(to_number):\n client.messages.create(to=to_number,\n from_=config.get('TWILIO', 'twilio_from_number'),\n body=f'RTX 3090 FE has been added to your Best Buy cart')", "def send_swarms_list_email(message, ref):\n swarm_list_email = settings.DEFAULT_SWARMS_EMAIL\n contact_email = settings.DEFAULT_FROM_EMAIL\n subject = render_to_string(\n 'contact/swarm_emails/new_swarm_subject.txt',\n {'message': message, 'ref': ref})\n body = render_to_string(\n 'contact/swarm_emails/new_swarm_body.txt',\n {'message': message, 'contact_email': contact_email})\n\n send_mail(\n subject,\n body,\n settings.DEFAULT_FROM_EMAIL,\n [swarm_list_email]\n )", "def _send_notification() -> None:\n send_notification(\n self,\n \"slack:@aaron\",\n \"New {0} Version: {1}\".format(\n self.properties[CONF_APP_NAME], new_version\n ),\n title=\"New Software 💿\",\n )", "def send_now(users, label, extra_context=None, on_site=True):\r\n if extra_context is None:\r\n extra_context = {}\r\n \r\n notice_type = NoticeType.objects.get(label=label)\r\n\r\n current_site = Site.objects.get_current()\r\n notices_url = u\"http://%s%s\" % (\r\n unicode(current_site),\r\n reverse(\"notification_notices\"),\r\n )\r\n\r\n current_language = get_language()\r\n\r\n formats = (\r\n 'short.txt',\r\n 'full.txt',\r\n 'notice.html',\r\n 'full.html',\r\n ) # TODO make formats configurable\r\n\r\n for user in users:\r\n recipients = []\r\n # get user language for user from language store defined in\r\n # NOTIFICATION_LANGUAGE_MODULE setting\r\n try:\r\n language = get_notification_language(user)\r\n except LanguageStoreNotAvailable:\r\n language = None\r\n\r\n if language is not None:\r\n # activate the user's language\r\n activate(language)\r\n\r\n # update context with user specific translations\r\n context = Context({\r\n \"user\": user,\r\n \"notice\": ugettext(notice_type.display),\r\n \"notices_url\": notices_url,\r\n \"current_site\": current_site,\r\n })\r\n context.update(extra_context)\r\n\r\n # get prerendered format messages\r\n messages = get_formatted_messages(formats, label, context)\r\n\r\n # Strip newlines from subject\r\n subject = ''.join(render_to_string('notification/email_subject.txt', {\r\n 'message': messages['short.txt'],\r\n }, context).splitlines())\r\n\r\n body = render_to_string('notification/email_body.txt', {\r\n 'message': messages['full.txt'],\r\n }, context)\r\n\r\n notice = Notice.objects.create(user=user, message=messages['notice.html'],\r\n notice_type=notice_type, on_site=on_site)\r\n if should_send(user, notice_type, \"1\") and user.email: # Email\r\n recipients.append(user.email)\r\n send_mail(subject, body, settings.DEFAULT_FROM_EMAIL, recipients)\r\n\r\n # reset environment to original language\r\n activate(current_language)", "def get_notification_template(self):\n if self.db_config_file.key_exists(\"notification_template_file\"):\n filename = self.db_config_file_value(\"notification_template_file\").strip('\"')\n return open(filename, 'rt').read()\n\n return get_data(\"asebackupcli\", \"notification.json\")", "def email_body_review_reminder():\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr>td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;padding-left:75px; padding-right:75px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t <font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">We hope you had a great appointment!<br>'\n\tmsg = msg + '\\t\\t\\t Your opinion goes a long way&mdash;write up your review of the appointment so others can learn from your experience with <a href=\"#\" style=\"color:#1488CC\">{user\\'s name}</a></font><br><br>'\n\tmsg = msg + '\\t</td></tr>'\n\n\tmsg = msg + '<td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:10px;padding-left:75px;padding-bottom:200px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '<a href=\"#\" style=\"color:#ffffff;text-decoration: none;display: inline-block;min-height: 38px;line-height: 39px;padding-right: 16px;padding-left: 16px;background: #1488CC;font-size: 14px;border-radius: 3px;border: 1px solid #1488CC;font-family:Garamond, EB Garamond, Georgia, serif; width:100px;text-align:center;\" target=\"_blank\">Rate & Review</a>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def sendNotificationEmail(fundName, status, message):\n\tgetSubject = lambda fundName, status: \\\n\t\tfundName + ' auto update succesful' \\\n\t\tif status == Constants.STATUS_SUCCESS else \\\n\t\tfundName + ' auto update failed'\n\n\tlogger.debug('sendNotificationEmail(): {0}'.format(fundName))\n\tsendMail( message\n\t\t\t, getSubject(fundName, status)\n\t\t\t, getMailSender()\n\t\t\t, getNotificationMailRecipients()\n\t\t\t, getMailServer()\n\t\t\t, getMailTimeout())", "def render_message(template_name, extra_context={}):\n mail_text = _render_mail_template(template_name, extra_context)\n rendered_mail = mail_text.replace(u\"\\r\\n\", u\"\\n\").replace(u\"\\r\", u\"\\n\").split(u\"\\n\")\n return rendered_mail[0], \"\\n\".join(rendered_mail[1:])", "def main_email(name, total, answered, not_answered, declines, remaining):\n\n start = smtplib.SMTP(host=HOST, port=PORT)\n start.starttls()\n start.login(ADDRESS, PASSWORD)\n\n date = datetime.datetime.now()\n date_now = date.strftime(\"%m-%d-%Y\")\n\n print_list, email_dict = simple_contacts('contacts.txt')\n\n emails = get_emails(print_list, email_dict)\n\n message_template = read_template()\n\n for mail in emails:\n pretty_print(f\"Sending email to {mail}\", \"!\")\n msg = MIMEMultipart()\n\n message = message_template.substitute(PERSON_NAME=name, DATE=date_now, TOTAL_CALLED=total, ANSWERED=answered, NOT_ANSWERED=not_answered, DECLINES=declines, REMAINING=remaining)\n\n msg['From'] = ADDRESS\n msg['To'] = mail\n msg['Subject'] = f\"{name} - Calling Campaign Summary - {date_now}\"\n\n msg.attach(MIMEText(message, 'plain'))\n start.send_message(msg)\n pretty_print(f\"Mail sent to {mail}\", \"!\")\n\n del msg\n\n start.quit()", "def send_email(\n template_name: str,\n to: typing.Union[str, typing.List[str]],\n personalisation: dict = None,\n reference: str = None,\n staff_email: bool = None,\n retry_attempts: int = 2,\n spoolable_ctx: Context = None,\n):\n client = NotifyClient.shared_client()\n try:\n client.send_email(\n template_name=template_name,\n to=to,\n personalisation=personalisation,\n reference=reference,\n staff_email=staff_email,\n )\n except APIError as e:\n should_retry = (\n # no retry without uWSGI spooler\n spoolable_ctx.spooled\n # no retry if run out of retry attempts\n and retry_attempts\n # retry only for \"service unavailable\" / \"internal error\" statuses\n and 500 <= e.status_code < 600\n # …unless it was caused by an invalid json response\n and not isinstance(e, InvalidResponse)\n )\n if should_retry:\n send_email(\n template_name=template_name,\n to=to,\n personalisation=personalisation,\n reference=reference,\n staff_email=staff_email,\n retry_attempts=retry_attempts - 1,\n )\n else:\n raise e", "def compose_email(self, donation_id=-1, write_totals=False):\n if not write_totals:\n amount = self.donations[donation_id]\n else:\n amount = self.total_donations\n email_string = f\"\\nDear {self.name},\\n Thank you for your generous\\\n gift of ${amount:.2f}! It will help Local Charity achieve our mission.\\n\\\n Best regards,\\n\\\n Local Charity\\n\\n\"\n return email_string", "def get_mail( traceback ):\n msg = MIMEText( traceback )\n msg[ 'Subject' ] = Header( 'FX daily cron error' )\n msg[ 'From' ] = 'FX daily cron'\n msg[ 'To' ] = 'tamakoshihiroki@gmail.com'\n msg[ 'Date' ] = formatdate( localtime = 9 )\n msg[ 'Content-Type' ] = ''.join(\n [ 'text/plain; charset=\"', BODY_ENCODING, '\"', ] )\n return msg", "def notification_email(self, sender, subject, body):\n\n\t\tts = str(int(time.time())*1000)\n\t\tparts = [sender, body, ts, subject]\n\t\tself._send_message(\"NOTIFICATION\", self._pack_message_data(0, parts))", "def notification_to_file(filename, notification):\n outfile = open(filename, 'a')\n timestamp = time.localtime(time.time())\n for data in timestamp:\n outfile.write(str(data) + ' ') \n outfile.write(notification + '\\n')\n outfile.close()", "def send_notification(semester):\n\n\tfrom University import Student\n\t#query to retrieve students\n\tstudents = Student.query.filter_by(sem = semester).all()\n\t#keep only the email-id of the students\n\tstudents = list(map(lambda x : x.email, students))\n\t#print('sending Message', students)\n\tmsg = Message('Meeting Schedule Notification.',\n\t\t\tsender = 'pesfacultyadvisor.sepro2017@gmail.com',\n\t\t\trecipients = students)\n\t#print('Object created!')\n\tmsg.body = \"Dear Student\\n A meeting is scheduled on so and so date.\\n We request you to attend the meeting.\"\n\tmail.send(msg)\n\n\treturn \"Notification Sent!!\"", "def read_template():\n\n text_msg = \"\"\"${PERSON_NAME} - Calling Campaign Summary - ${DATE}:\\n\n Total Called = ${TOTAL_CALLED}\\n\n Answered = ${ANSWERED}\\n\n Not Answered = ${NOT_ANSWERED}\\n\n Declines = ${DECLINES}\\n\n Remaining = ${REMAINING}\\n\n \\n\n Thank You.\"\"\"\n\n return Template(text_msg)", "def send_contact_notification():\n logging.info(\"Mail sending..\")\n notifications = Notification.query.filter_by(email_sent=False, user_notification=True).all()\n count = 0\n for notification in notifications:\n user_id = notification.user_id\n # fetch user mail from User service\n try:\n # print('request to:',f\"http://{os.environ.get('GOS_USER')}/user?id={user_id}\")\n resp = requests.get(f\"http://{os.environ.get('GOS_USER')}/user?id={user_id}\")\n if resp.status_code != 200:\n logging.error(f\"[{resp.status_code}] Mail task, User service replied with error {resp.json()}\")\n continue\n email = resp.json()['email']\n except Exception as e:\n # if user requests fails, we'll try to send email at next task trigger\n logging.error(e)\n continue\n if email is not None and email.strip() != '':\n # send email\n date = notification.date.strftime('%Y-%m-%d at %H:%M')\n template = env.get_template('./mail_notification.html')\n output = template.render(dest=resp.json(), date=date)\n pos_outcome = send_email(email, output)\n if pos_outcome:\n notification.email_sent = True\n db.session.commit()\n logging.info(f\"Email to {email} just sent\")\n count += 1\n else:\n logging.error(f\"Error while sending email to {email}\")\n\n logging.info(f'{count} email(s) sent')", "def notify_event_participant_application(request, user, registered_user, event):\n\n subject = f\"{settings.SITE_NAME} {event.title} New Participant Registration\"\n context = {\n 'name': user.get_full_name(),\n 'registered_user_name': registered_user.get_full_name(),\n 'url_prefix': get_url_prefix(request),\n 'events_home': reverse('event_home'),\n 'event_title': event.title,\n 'SITE_NAME': settings.SITE_NAME,\n }\n body = loader.render_to_string('events/email/event_registration.html', context)\n # Not resend the email if there was an integrity error\n send_mail(subject, body, settings.DEFAULT_FROM_EMAIL, [user.email], fail_silently=False)", "def mail_template(self, template_name, send_to=None, user = None, event_title=\"\", **kwargs):\n barcamp = kwargs.get('barcamp')\n if user is None:\n user = self.user\n if send_to is None:\n send_to = user.email\n if barcamp is not None:\n subject = barcamp.mail_templates['%s_subject' %template_name]\n tmpl = jinja2.Template(barcamp.mail_templates['%s_text' %template_name])\n kwargs['fullname'] = user.fullname\n payload = tmpl.render(**kwargs)\n payload = payload.replace('((fullname))', user.fullname)\n payload = payload.replace('((event_title))', event_title)\n mailer = self.app.module_map['mail']\n mailer.mail(send_to, subject, payload)", "def send_welcome_email(username: str, email: str) -> None:\n\n # TODO ...\n # Load html templates and get the content from it.\n # html_content = ...\n\n content = f\"<h1>Welcome to app, {username}</h1>\"\n email = sender.create_email(\n to_list=[email],\n subject=f\"Welcome from {{ app }}\",\n html_content=content,\n )\n sender.send_email(email_to_send=email)", "def task_rescheduled_notify(name, attempts, last_error, date_time, task_name, task_params):\n body = loader.render_to_string(\n 'notification/email/notify_rescheduled_task.html', {\n 'name': name,\n 'attempts': attempts,\n 'last_error': last_error,\n 'date_time': date_time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n 'task_name': task_name,\n 'task_params': task_params,\n 'signature': settings.EMAIL_SIGNATURE\n })\n subject = name + \" has been rescheduled\"\n mail_admins(subject, body, settings.DEFAULT_FROM_EMAIL)", "def payment_instructions_email_notification(sender, **kwargs):\n subject_template_name = \\\n 'shop_simplenotifications/payment_instructions_subject.txt'\n body_text_template_name = \\\n 'shop_simplenotifications/payment_instructions_body.txt'\n body_html_template_name = \\\n 'shop_simplenotifications/payment_instructions_body.html'\n \n request = kwargs.get('request')\n order = kwargs.get('order')\n \n emails = []\n if order.user and order.user.email: \n emails.append(order.user.email)\n if request and get_billing_address_from_request(request):\n address = get_billing_address_from_request(request)\n if hasattr(address, 'email'):\n emails.append(address.email)\n emails = list(set(emails)) # removes duplicated entries\n if emails:\n subject = loader.render_to_string(\n subject_template_name,\n RequestContext(request, {'order': order})\n )\n subject = subject.join(subject.splitlines())\n\n text_content = loader.render_to_string(\n body_text_template_name,\n RequestContext(request, {'order': order})\n )\n\n try:\n html_content = loader.render_to_string(\n body_html_template_name,\n RequestContext(request, {'order': order})\n )\n except TemplateDoesNotExist:\n html_content = None\n\n from_email = getattr(settings, 'SN_FROM_EMAIL',\n settings.DEFAULT_FROM_EMAIL)\n\n message = EmailMultiAlternatives(subject, text_content, from_email,\n emails)\n if html_content:\n message.attach_alternative(html_content, \"text/html\")\n message.send()", "def build_message():\n outgoing_mail = Mail()\n outgoing_mail.from_email = Email(email_from_address, email_from_name)\n outgoing_mail.subject = subject\n personalization = Personalization()\n for recipient in email_to_addresses:\n personalization.add_to(Email(recipient))\n outgoing_mail.add_personalization(personalization)\n outgoing_mail.add_content(Content(\"text/plain\", str.join('\\n', _log)))\n outgoing_mail.add_content(Content(\"text/html\", \"<html><body> {} </body></html>\".format(str.join(' <br /> ', _log))))\n return outgoing_mail.get()", "def storage_request_notify(request, project):\n subject = 'Storage request received: {0}'.format(\n project.title)\n\n content = {'project': project,\n 'domain': get_current_site(request),\n 'url_prefix': get_url_prefix(request),\n 'signature': settings.EMAIL_SIGNATURE,\n 'project_info': email_project_info(project),\n 'footer': email_footer(), 'SITE_NAME': settings.SITE_NAME}\n\n content['name'] = \"Colleague\"\n body = loader.render_to_string(\n 'notification/email/storage_request_notify_team.html', content)\n\n send_mail(subject, body, settings.DEFAULT_FROM_EMAIL,\n [settings.CONTACT_EMAIL], fail_silently=False)", "def _render_mail(self, rebuild, success, canceled):\n subject_template = 'Image %(image)s; Status %(endstate)s; Submitted by %(user)s'\n body_template = '\\n'.join([\n 'Image: %(image)s',\n 'Status: %(endstate)s',\n 'Submitted by: %(user)s',\n 'Logs: %(logs)s',\n ])\n\n endstate = None\n if canceled:\n endstate = 'canceled'\n else:\n endstate = 'successful' if success else 'failed'\n url = None\n if self.url and self.workflow.openshift_build_selflink:\n url = urljoin(self.url, self.workflow.openshift_build_selflink + '/log')\n\n formatting_dict = {\n 'image': self.workflow.image,\n 'endstate': endstate,\n 'user': '<autorebuild>' if rebuild else self.submitter,\n 'logs': url\n }\n return (subject_template % formatting_dict, body_template % formatting_dict)", "def send_application_submitted_notification(application):\n candidate_name = application.candidate_name\n if application.authorized_email is not None:\n candidate_email = application.authorized_email\n else:\n candidate_email = application.questionnaire.candidate_email\n\n group_name = application.group.name\n group_email = application.rep_email\n\n cc_emails = [\n '\"%s\" <%s>' % (candidate_name, candidate_email),\n '\"%s\" <%s>' % (\n 'Our Revolution Electoral Coordinator',\n ELECTORAL_COORDINATOR_EMAIL\n ),\n ]\n from_email = 'Our Revolution <%s>' % DEFAULT_FROM_EMAIL\n to_email = [\n # Use double quotes for group name\n '\"%s\" <%s>' % (group_name, group_email),\n ]\n\n subject = \"\"\"\n Your nomination for %s has been submitted! Here are the next steps.\n \"\"\" % candidate_name\n\n d = {\n 'or_logo_secondary': OR_LOGO_SECONDARY,\n 'group_name': group_name,\n 'candidate_name': candidate_name\n }\n\n html_template = get_template('email/application_submit_email.html')\n html_content = html_template.render(d)\n text_template = get_template('email/application_submit_email.txt')\n text_content = text_template.render(d)\n\n msg = EmailMultiAlternatives(\n subject,\n text_content,\n from_email,\n to_email,\n cc=cc_emails\n )\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()", "def execute(self):\n return LOGGER.info(f\"{datetime.datetime.now()} - Sending notification in Slack\")", "def do_create(service,summary,description,startday,\\\n starttime,endtime,username,email):\n event = {\n 'summary': 'Code Clinic: {}'.format(summary),\n 'description': '{}.'.format(description),\n 'start': {\n 'dateTime': '{}T{}:00'.format(startday, starttime),\n 'timeZone': 'GMT+02',\n },\n 'end': {\n 'dateTime': '{}T{}:00'.format(startday,endtime),\n 'timeZone': 'GMT+02',\n },\n 'recurrence': [\n 'RRULE:FREQ=DAILY;COUNT=1'\n ],\n 'attendees': [\n {\n 'displayName': username,\n 'email': email,\n 'optional': True,\n 'comment': 'Creator',\n 'responseStatus': 'accepted',\n },\n ],\n 'anyoneCanAddSelf': True,\n\n 'reminders': {\n 'useDefault': False,\n 'overrides': [\n {'method': 'email', 'minutes': 24 * 60},\n {'method': 'popup', 'minutes': 10},\n ],\n },\n }\n\n event = service.events().insert(calendarId='primary', body=event,\\\n sendUpdates='all').execute()\n\n return event", "def replyMessage(_email, _name):\n\n _mailer = app.config['MAIL_USERNAME']\n mesg = Message(\"Message Received\", sender=('iSOLveIT Contact', f'{_mailer}'), recipients=[_email])\n mesg.body = f'''Hello {_name},\nThe message you sent to Randy has been received. \nRandy will contact you within 24 hours.\nThank you.\n\nRegards,\nRandy\n\nDate Sent: {dt.now(tz=GMT_tz).strftime('%B %d, %Y, %H:%M ') + 'GMT'}\n'''\n mail.send(mesg)\n return 'OK'", "def create_pubsub_notification(context, depends_on, status_string):\n\n return [{\n 'name': 'pubsub-notification-{}'.format(status_string),\n 'action': 'gcp-types/pubsub-v1:pubsub.projects.topics.publish',\n 'properties': {\n 'topic':\n context.properties['pubsubTopic'],\n 'messages': [{\n 'attributes': {\n 'projectId': context.properties['projectId'],\n 'status': status_string,\n }\n }]\n },\n 'metadata': {\n # The notification should only run after *all* project-related\n # resources have been deployed.\n 'dependsOn': depends_on,\n # Only trigger the pubsub message when the deployment is created (not on\n # update or delete).\n 'runtimePolicy': ['UPDATE_ALWAYS'],\n },\n }]", "def _get_message_body(self, template_file, message_data):\r\n\r\n msg = \"\"\"\r\nYour import has failed. The error is listed below. Please file a bug at\r\nhttps://github.com/bookieio/bookie/issues if this error continues. You may\r\nalso join #bookie on freenode irc if you wish to aid in debugging the issue.\r\nIf the error pertains to a specific bookmark in your import file you might try\r\nremoving it and importing the file again.\r\n\r\nError\r\n----------\r\n\r\n{exc}\r\n\r\nA copy of this error has been logged and will be looked at.\r\n\r\n---\r\nThe Bookie Team\"\"\".format(**message_data)\r\n return msg", "def send_message(user_id, name, user_info, subject, body):\n send_mail(subject, body, settings.SERVER_EMAIL, [\"%s <%s>\" % (name, user_id)],\n fail_silently=False, html_message=body)", "def send_email(self, text):\n msg_text = MIMEText(text)\n msg_text['Subject'] = '[WebSite Watchdog] Failure'\n msg_text['From'] = self.from_email\n msg_text['To'] = self.to_email\n \n s = smtplib.SMTP(self.smtp_server)\n s.sendmail(self.from_email, [self.to_email], msg_text.as_string())\n s.quit()", "def sendEmail(_name, _email, _body):\n\n _mailer = app.config['MAIL_USERNAME']\n msg = Message(\"Contact Form\", sender=('iSOLveIT Contact', f'{_mailer}'), recipients=[f'{_mailer}'])\n msg.body = f'''{_body}\n\n\nSender's Name: {_name}\nSender's Email: {_email}\nDate Sent: {dt.now(tz=GMT_tz).strftime('%B %d, %Y, %H:%M ') + 'GMT'}\n'''\n mail.send(msg)\n return 'OK'", "def send_emails_to_subscribers(creator_id, exploration_id, exploration_title):\n\n creator_name = user_services.get_username(creator_id)\n email_subject = ('%s has published a new exploration!' % creator_name)\n email_body_template = (\n 'Hi %s,<br>'\n '<br>'\n '%s has published a new exploration! You can play it here: '\n '<a href=\"https://www.oppia.org/explore/%s\">%s</a><br>'\n '<br>'\n 'Thanks, and happy learning!<br>'\n '<br>'\n 'Best wishes,<br>'\n '- The Oppia Team<br>'\n '<br>%s')\n\n if not feconf.CAN_SEND_EMAILS:\n log_new_error('This app cannot send emails to users.')\n return\n\n if not feconf.CAN_SEND_SUBSCRIPTION_EMAILS:\n log_new_error('This app cannot send subscription emails to users.')\n return\n\n recipient_list = subscription_services.get_all_subscribers_of_creator(\n creator_id)\n recipients_usernames = user_services.get_usernames(recipient_list)\n recipients_preferences = user_services.get_users_email_preferences(\n recipient_list)\n for index, username in enumerate(recipients_usernames):\n if recipients_preferences[index].can_receive_subscription_email:\n email_body = email_body_template % (\n username, creator_name, exploration_id,\n exploration_title, EMAIL_FOOTER.value)\n _send_email(\n recipient_list[index], feconf.SYSTEM_COMMITTER_ID,\n feconf.EMAIL_INTENT_SUBSCRIPTION_NOTIFICATION,\n email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)" ]
[ "0.7332573", "0.7300195", "0.70396394", "0.65700346", "0.6381602", "0.63333446", "0.6274574", "0.62374425", "0.61783296", "0.61655205", "0.6151091", "0.61510384", "0.6135396", "0.6120886", "0.61056536", "0.60524446", "0.6039856", "0.6035018", "0.6004501", "0.5985513", "0.59728086", "0.5951617", "0.59239763", "0.5875435", "0.5868155", "0.5837783", "0.58039975", "0.57940316", "0.5793894", "0.5788486", "0.57722247", "0.57711476", "0.57701385", "0.5768348", "0.57610035", "0.5751811", "0.5749164", "0.57456523", "0.5744173", "0.57423204", "0.57322633", "0.57231754", "0.5719516", "0.57127637", "0.5708205", "0.5706581", "0.5693427", "0.5689232", "0.5689031", "0.56812084", "0.5680418", "0.56758636", "0.56684285", "0.56645435", "0.56593275", "0.5649221", "0.56284297", "0.56222653", "0.56213534", "0.5618784", "0.56159675", "0.5613054", "0.5588232", "0.558425", "0.55739343", "0.5571493", "0.5566473", "0.556605", "0.55505925", "0.5546588", "0.55355096", "0.55246603", "0.5523578", "0.5518273", "0.5514211", "0.5511709", "0.55112904", "0.5509026", "0.55003846", "0.54966795", "0.54950327", "0.549397", "0.54917467", "0.54875076", "0.5480942", "0.5473452", "0.5464703", "0.5464223", "0.54635465", "0.54627985", "0.5453797", "0.5452399", "0.54495686", "0.5441687", "0.5440738", "0.542318", "0.5420897", "0.5410087", "0.53949624", "0.5391865" ]
0.74509716
0
Create a formatted email message to sent to worker user template alerts/service_notification.txt
def alert_service_notification(user, service): message = loader.get_template( 'alerts/service_notification.txt').render( {'user': user, 'service': service}) return message
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def alert_new_service_notification(hirer, worker, service):\n\n domain = Site.objects.get_current().domain\n url = \"http://\" + domain + \"/worker/\"\n\n message = loader.get_template(\n 'alerts/new_service_notification.txt').render(\n {'worker': worker, 'hirer': hirer, 'service': service, 'url':url})\n\n return message", "def send_created_email(self):\n if settings.NOTIFY_NEW_REG:\n to = settings.NOTIFY_NEW_REG\n message = \"\"\"\\\nGreetings,<br><br>\n\nA new vehicle registration has been submitted by %s.<br><br>\n\nGo here to view or edit the request: <br>\n<a href=\"%s\">%s</a>\n<br><br>\nSincerely,<br><br>\nThe Janelia Parking Permit Program\n \"\"\" % (self.user_display_name(), self.get_edit_url(True), self.get_edit_url(True))\n subject = 'A new parking permit request has been entered'\n from_email = 'parkingpermit-donotreply@janelia.hhmi.org'\n text_content = re.sub(r'<[^>]+>','',message)\n html_content = message\n msg = EmailMultiAlternatives(subject, text_content, from_email, to)\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()", "def generateNotifyMessage(self):\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n today = date.today()\n current_date = today.strftime(\"%B %d, %Y\")\n\n subject = \"Progam operating warning - Not Running\"\n body = \"Since \" + current_date + \" at \" + current_time \n msg = f'Subject: {subject} \\n\\n{body}'\n return msg", "def generate_selfservice_notice_email(context):\n subject = \"Self Service Form Submission\"\n from_email = settings.DEFAULT_FROM_ADDR\n to_email = [settings.EMAIL_TARGET_W, settings.EMAIL_TARGET_VP]\n\n cont_html = render_to_string('emails/email_selfservice.html', context)\n cont_text = render_to_string('emails/email_selfservice.txt', context)\n\n email = EmailMultiAlternatives(subject, cont_text, from_email, to_email)\n email.attach_alternative(cont_html, \"text/html\")\n\n return email", "def alert_subscription_message(request, user):\n message = loader.get_template(\n 'alerts/subscription_message.txt').render(\n {'user': user, 'evaluation_link': resolve_worker_evaluation_url(request, user)})\n\n return message", "def test_email_content():\n\n time_of_day = alerts.current_time()\n hostname = alerts.host_name()\n\n subject = \"Subject: Raspi-Sump Email Test\"\n message = \"Raspi-Sump Test Email\"\n\n return \"\\r\\n\".join(\n (\n f\"From: {configs['email_from']}\",\n f\"To: {configs['email_to']}\",\n f\"{subject}\",\n \"\",\n f\"{hostname} - {time_of_day} - {message}.\",\n )\n )", "def notify(template_name, context):\n to_address = context['to_address']\n template_name = 'emails/' + template_name\n subject_template = get_template(\n template_name + '_subject.html')\n body_template = get_template(template_name + '_body.html')\n context = Context(context)\n # Strip, otherwise we get header errors.\n subject = subject_template.render(context).strip()\n body = body_template.render(context)\n try:\n sent = send_mail(subject, body, FROM_ADDRESS, [to_address])\n except Exception:\n pass\n\n status = 's' if sent else 'e'\n Notification.objects.create(\n from_address=FROM_ADDRESS,\n to_address=to_address,\n subject=subject,\n body=body,\n status=status,\n )", "def send_welcome_email(user):\n\n register.customise_auth_messages()\n auth_messages = current.auth.messages\n\n try:\n recipient = user[\"email\"]\n except (KeyError, TypeError):\n recipient = None\n if not recipient:\n current.response.error = auth_messages.unable_send_email\n return\n\n # Look up CMS template for welcome email\n db = current.db\n s3db = current.s3db\n\n settings = current.deployment_settings\n\n # Define join\n ctable = s3db.cms_post\n ltable = s3db.cms_post_module\n join = ltable.on((ltable.post_id == ctable.id) & \\\n (ltable.module == \"auth\") & \\\n (ltable.resource == \"user\") & \\\n (ltable.deleted == False))\n\n # Get message template\n query = (ctable.name == \"WelcomeMessage\") & \\\n (ctable.deleted == False)\n row = db(query).select(ctable.doc_id,\n ctable.body,\n join = join,\n limitby = (0, 1),\n ).first()\n if row:\n message_template = row.body\n else:\n # Disabled\n return\n\n # Look up attachments\n dtable = s3db.doc_document\n query = (dtable.doc_id == row.doc_id) & \\\n (dtable.file != None) & (dtable.file != \"\") & \\\n (dtable.deleted == False)\n rows = db(query).select(dtable.file)\n attachments = []\n for row in rows:\n filename, stream = dtable.file.retrieve(row.file)\n attachments.append(current.mail.Attachment(stream, filename=filename))\n\n # Default subject from auth.messages\n system_name = s3_str(settings.get_system_name())\n subject = s3_str(auth_messages.welcome_email_subject % \\\n {\"system_name\": system_name})\n\n # Custom message body\n data = {\"system_name\": system_name,\n \"url\": settings.get_base_public_url(),\n \"profile\": URL(\"default\", \"person\", host=True),\n }\n message = formatmap(message_template, data)\n\n # Send email\n success = current.msg.send_email(to = recipient,\n subject = subject,\n message = message,\n attachments = attachments,\n )\n if not success:\n current.response.error = auth_messages.unable_send_email", "def format_mail(template: str, event: dict, ishtml: bool) -> str:\n header = \"Someone filled the contact form\"\n subtext = \"\"\n # uuid.uuid4().hex\n unsubscribe_key = \"f4bd5dd85908487b904ea189fb81e753\" # Not actually applicable for Admin email ID\n keys = ['firstName', 'lastName', 'email', 'subject', 'message']\n for key in keys:\n if ishtml:\n value = html.escape(event[key]).replace('\\n', '<br/>')\n subtext += \"{}: {}<br>\".format(key, value)\n else:\n subtext += \"{}: {}\\n\".format(key, event[key]).replace('\\n', '\\r\\n')\n template = template.replace('{{header}}', header)\n template = template.replace('{{subtext}}', subtext)\n template = template.replace('{{unsubscribe-key}}', unsubscribe_key)\n return template", "def task_send_reminder_email():\n send_reminder_email()\n logger.info(\"Sent reminder email\")", "def send_welcome_email(cls, user):\n\n cls.customise_auth_messages()\n auth_messages = current.auth.messages\n\n # Look up CMS template for welcome email\n try:\n recipient = user[\"email\"]\n except (KeyError, TypeError):\n recipient = None\n if not recipient:\n current.response.error = auth_messages.unable_send_email\n return\n\n\n db = current.db\n s3db = current.s3db\n\n settings = current.deployment_settings\n\n # Define join\n ctable = s3db.cms_post\n ltable = s3db.cms_post_module\n join = ltable.on((ltable.post_id == ctable.id) & \\\n (ltable.module == \"auth\") & \\\n (ltable.resource == \"user\") & \\\n (ltable.deleted == False))\n\n # Get message template\n query = (ctable.name == \"WelcomeMessageInvited\") & \\\n (ctable.deleted == False)\n row = db(query).select(ctable.doc_id,\n ctable.body,\n join = join,\n limitby = (0, 1),\n ).first()\n if row:\n message_template = row.body\n else:\n # Disabled\n return\n\n # Look up attachments\n dtable = s3db.doc_document\n query = (dtable.doc_id == row.doc_id) & \\\n (dtable.file != None) & (dtable.file != \"\") & \\\n (dtable.deleted == False)\n rows = db(query).select(dtable.file)\n attachments = []\n for row in rows:\n filename, stream = dtable.file.retrieve(row.file)\n attachments.append(current.mail.Attachment(stream, filename=filename))\n\n # Default subject from auth.messages\n system_name = s3_str(settings.get_system_name())\n subject = s3_str(auth_messages.welcome_email_subject % \\\n {\"system_name\": system_name})\n\n # Custom message body\n data = {\"system_name\": system_name,\n \"url\": settings.get_base_public_url(),\n \"profile\": URL(\"default\", \"person\", host=True),\n }\n message = formatmap(message_template, data)\n\n # Send email\n success = current.msg.send_email(to = recipient,\n subject = subject,\n message = message,\n attachments = attachments,\n )\n if not success:\n current.response.error = auth_messages.unable_send_email", "def generate_web_service_email(details):\n subject = details[\"subject\"]\n body = details[\"message\"]\n from_email = settings.DEFAULT_FROM_ADDR\n reply_to_email = [settings.EMAIL_TARGET_W]\n to_email = details[\"email_to\"]\n\n email = GenericEmailGenerator(subject=subject, to_emails=to_email, bcc=reply_to_email, from_email=from_email,\n reply_to=reply_to_email, body=body, context={'mrkdwn': True})\n\n return email", "def notify(nt_id, application, action, remedy, subj, heading):\n\n email = get_email(nt_id)\n lambda_client = boto3.client('lambda')\n messages = create_messages(application, action, remedy)\n print(email)\n email_data = {\n 'sender_mail': SENDER_EMAIL,\n 'email': email,\n 'subj': subj,\n 'heading': heading,\n 'messages': messages,\n 'region': os.environ.get(\"AWS_DEFAULT_REGION\")\n }\n invoke_email_response = lambda_client.invoke(\n FunctionName= os.environ.get(\"formatted_email\"),\n InvocationType= \"RequestResponse\",\n Payload= json.dumps(email_data)\n )\n err = checkError(invoke_email_response, \"Error sending email!\")\n if err:\n print(str(err))\n\n slack_data = {\n 'application_url': APP_URL,\n 'channel': CHANNEL,\n 'message': messages[1].rsplit(\"\\n\",5)[0],\n 'channel_id': CHANNEL_ID,\n 'nt_ids': [nt_id]\n }\n invoke_slack_response = lambda_client.invoke(\n FunctionName= os.environ.get(\"slack_message\"),\n InvocationType= \"RequestResponse\",\n Payload= json.dumps(slack_data)\n )\n err = checkError(invoke_slack_response, \"Error sending slack message!\")\n if err:\n print(str(err))", "def create_email(username, provider):\n print(f\"Your new email is {username}@{provider}.com\")", "def get_first_trial_communication_email(account):\n\n SUBJECT = 'Foojal: First couple of days'\n EMAIL_CONTENT = \"\"\"\n\nHello %s\n\nJust checking to see how you are liking your first few days of Foojal.com.\nIf you have any questions during your trial period, please email us; we would\nlove to talk with you.\n\nYour Team:\n%s\"\"\"\n\n message = EmailMessage()\n message.sender = settings.SITE_EMAIL\n message.to = account.user.email()\n message.subject = SUBJECT\n message.body = EMAIL_CONTENT % (account.nickname, settings.SITE_EMAIL)\n return message", "def _get_message_body(self, template_file, message_data):\r\n\r\n msg = \"\"\"\r\nThe import for user {username} has failed to import. The path to the import\r\nis:\r\n\r\n{file_path}\r\n\r\nError:\r\n\r\n{exc}\r\n\r\n\"\"\".format(**message_data)\r\n return msg", "def send_confirmation(send_to, apply_info):\n msg = \"\"\"Hello,\n\nThis is a friendly confirmation for your Simply Apply application for position '{job_title}' at {job_company}.\n\nThank you,\nThe Simply Hired Team\"\"\".format(**apply_info)\n\n send_email('Simply Apply <noreply@simplyhired.com>', send_to, 'Simply Apply Confirmation', msg)", "def get_second_trial_communication_email(account):\n\n SUBJECT = \"Foojal: Don't lose out.\"\n EMAIL_CONTENT = \"\"\"\n\nHello %s\n\nJust checking to see how you are liking your Foojal.com trial subscription.\n\nSign up today for a full year of Foojal.com for only $24.00 a year before we increase the price.\nThat's only $2.00 a month.\n\nIf you have any questions during your trial period, please email us; we would\nlove to talk with you.\n\nThank you, Kathy and Adam\n%s\"\"\"\n\n message = EmailMessage()\n message.sender = settings.SITE_EMAIL\n message.to = account.user.email()\n message.subject = SUBJECT\n message.body = EMAIL_CONTENT % (account.nickname, settings.SITE_EMAIL)\n return message", "def create_messages(application, action, remedy):\n\n messages = [] \n messages.append(\"\"\"Your Resources: </br><pre style=\"margin-left: 40px\">\"\"\" + application + \"</br></pre>\" + action + \"\"\" in AWS. <strong style=\"font-family: 'Helvetica Neue',Helvetica,Arial,sans-serif; box-sizing: border-box; font-size: 14px; margin: 0;\">\"\"\" + remedy +\"\"\"</strong>\n </td>\n </tr><tr style=\"font-family: 'Helvetica Neue',Helvetica,Arial,sans-serif; box-sizing: border-box; font-size: 14px; margin: 0;\"><td class=\"content-block\" style=\"font-family: 'Helvetica Neue',Helvetica,Arial,sans-serif; box-sizing: border-box; font-size: 14px; vertical-align: top; margin: 0; padding: 0 0 20px;\" valign=\"top\">\n This message was sent to inform you of changes happening to your resources.\n <ul>\n <li>New instances are auto-tagged with an expiration date, an NT ID, and a patch group if invalid.</li>\n <li>Instances without the necessary tags are notified through email and Slack.</li>\n </ul>\n If you have any further questions, please reply to this email.\"\"\")\n \n messages.append(\"Your Resources:\\n\\n\" + application + \"\\n\\n\" + action + \" in AWS. \" + remedy + \"\\n\" + \n (\"\\nThis message was sent to inform you of changes happening to your resources.\\n\"\n \"\\nNew instances are auto-tagged with an expiration date, an NT ID, and a patch group if invalid.\"\n \"Instances without Owner Mail and Owner Team tags are notified through email and slack.\\n\"\n \"\\nIf you have any further questions, please reply to this email.\")) \n\n return messages", "def make_email_message(itrf_begin, epoch_begin, itrf_final, epoch_final, velocity, date):\n\n message = \"Estimado Usuario,\\n\\nEn adjunto encontrará los resultados de la transformacion ITRF de acuerdo a la siguiente configuración:\\n\\nITRF inicial: \"+str(itrf_begin)+\"\\nEpoca inicial: \"+str(epoch_begin)+\"\\nITRF final: \"+str(itrf_final)+\"\\nEpoca final: \"+str(epoch_final)+\"\\nModelo de velocidad: \"+velocity+\"\\nFecha de la solicitud de la transformación: \"+date+\"\\n\\n\\nSaludos Cordiales,\\n\\nEquipo de Geodesia del IGVSB.\"\n return message", "def get_personalized_notification_email_text(personal_id):\n return notification_email_text % (personal_id, personal_id)", "def notify_email(subj, message, json, logger=None):\n\n fname = os.path.join(os.path.dirname(__file__), \"config_emails.conf\")\n elist = read_file_aslist(fname, logger)\n\n if logger is not None:\n logger.debug(\"\"\"\nSubject: {}\nMessage: {}\nJson: {}\nEmails: {}\n \"\"\".format(subj, message, json, elist))", "def get_last_trial_communication_email(account):\n\n SUBJECT = \"Foojal: Your trial is over!\"\n EMAIL_CONTENT = \"\"\"\n\nHello %s\n\nWe hope you liked your Foojal.com trial and that you will join us for a full year for only $24.00.\n\nTo get a full year subscription to the best online photo food journal, go to your account page at http://app.foojal.com/account.\n\nIf you have any questions, please email us; we would love to talk with you.\n\nThank you, Kathy and Adam\n\n\"\"\"\n message = EmailMessage()\n message.sender = settings.SITE_EMAIL\n message.to = account.user.email()\n message.subject = SUBJECT\n message.body = EMAIL_CONTENT % account.nickname\n return message", "def construct_email_content(self):\n # Construct header of the message\n content = MAIL_HEAD_CONTENT.replace(\"TITLE_HOLDER\", self.title).replace('FAIL_JOB_HOLDER',\n self.fail_job_content).replace(\n \"TIME_HOLDER\", os.getenv(\"START_TIME\")).replace(\"GRAPH_HOLDER\", os.getenv(\"BENCHMARK_GRAPH\")).replace(\n \"JOB_HOLDER\", os.getenv(\"BENCHMARK_TYPE\")).replace(\"DEVICE_HOLDER\", os.getenv(\"DEVICE_TYPE\")).replace(\"CUDA_HOLDER\", os.getenv(\"VERSION_CUDA\")).replace('DISPLAY', self.job_display)\n\n if not self.alarm_info:\n return\n # Construct alarm content\n content += self.alarm_info\n # Construct the tail of the message\n content += MAIL_TAIL_CONTENT.replace(\"BENCHMARK_WEBSITE1\", os.getenv(\"BENCHMARK_WEBSITE1\", \"\")).strip().replace(\n 'RUN_ENV_HOLDER', self.env_content).replace(\"BENCHMARK_WEBSITE2\", os.getenv(\"BENCHMARK_WEBSITE2\"))\n\n with open(os.path.join(self.log_path, \"mail.html\"), \"w\") as f_object:\n f_object.write(content)", "def stock_email_blast(stock_dict, notification_time):\n\n with bigbeta_app.app_context():\n print('sending email')\n user_list = build_users_list()\n msg = Message('Big Mover in the Market!',\n sender=email_sender,\n recipients=['jonmbrenner@gmail.com'])\n # recipients=[user_list])\n msg.body = f\"\"\"\\\n!!!HIGH SHORT INTEREST MOVER ALERT!!!\n${stock_dict['ticker']}\nShort Interest: {stock_dict['short_interest']}\nFloat: {stock_dict['free_float']}\nDays to Cover: {stock_dict['dtc']}\nRelative Volume: {stock_dict['rvol']}\nNews Catalysts: {stock_dict['stories']}\n\nLast Price: {stock_dict['last_price']} collected at {cur_tm_log}\nNotification kicked off at {notification_time} EST\n\nGo get it!\n- BigBeta Team\n\"\"\"\n\n mail.send(msg)\n\n return None", "def notifications_n_email_after_event_creation(sender, instance, **kwargs):\n alarm = instance.alarm # Alarm which generated the event\n\n subscriptions = Subscription.objects.filter(alarm=alarm) # Getting the subscriptions associated with alarms\n sub_serializer = SubscriptionSerializer(subscriptions, many=True)\n send = [] # list of emails which the mail was send\n notificated = [] # list with users notificated\n\n # If no device, no variable and no content_type, there is nothing to send yet. Cancel notification and email\n if instance.device is None and instance.variables is None and len(instance.content_type.all()) == 0 :\n return\n for sub in sub_serializer.data: # Itering for subscription\n if sub['user'] is not None: # if user field isn't NULL AND not Group\n user = User.objects.get(id=sub['user'])\n if sub['active'] and user not in notificated: # if subscription is active\n Notification.objects.create(user=user, event=instance) # creating notification\n notificated.append(user) # adding user to the notified list\n if sub['email']: # if email option is checked\n email = user.email\n if email not in send: # for dont repeat email\n # Get a dict with relevant information about the event\n context = {'event': instance,\n 'alarm': instance.alarm,\n 'user': user,\n 'device': instance.device,\n 'var': instance.variables,\n 'content_type': instance.content_type.all()}\n plain_text = get_template('mail.txt') # Plain text template\n text_content = plain_text.render(context)\n subject = 'Event Alert: ' + instance.__str__()\n from_email = 'noreply@localhost.com'\n to = email\n msg = EmailMultiAlternatives(subject, text_content, from_email, [to])\n try:\n if sub['staff_template'] is not None:\n htmly = get_template(sub['staff_template']) # Define the HTML template\n html_content = htmly.render(context) # Rendering the templates with context information\n elif sub['staff_template_text'] != \"\":\n htmly = Template(sub['staff_template_text'])\n html_content = htmly.render(Context(context))\n elif sub['user_template'] is not None:\n htmly = get_template(sub['user_template']) # Define the HTML template\n html_content = htmly.render(context) # Rendering the templates with context information\n elif sub['user_template_text'] != \"\":\n htmly = Template(sub['user_template_text'])\n html_content = htmly.render(Context(context))\n msg.attach_alternative(html_content, 'text/html')\n msg.send()\n except:\n msg.send()\n print('Mail send to %s' % email)\n\n if sub['group'] is not None: # if is group and not user\n users_mail_list = [] # list with staff users instances\n if sub['active']: # if subscription is active\n group = Group.objects.get(pk=sub['group']) # Getting the group by id\n users = User.objects.filter(groups__name=group) # getting the users for group\n context = {'event': instance,\n 'alarm': instance.alarm,\n 'user': group,\n 'device': instance.device,\n 'var': instance.variables}\n for user in users: # Iterating users\n if user not in notificated:\n Notification.objects.create(user=user, event=instance) # creating notification\n notificated.append(user) # adding user to notificated list\n if sub['email']:\n mail = user.email # Adding the email for users in the user list\n if mail not in send: # for don't repeat email\n users_mail_list.append(mail)\n send.append(mail)\n # After getting all the emails and classifying it for staff and not staff members\n plain_text = get_template('mail.txt') # Plain text template\n text_content = plain_text.render(context)\n subject = 'Event Alert: ' + instance.__str__()\n from_email = 'noreply@localhost.com'\n msg = EmailMultiAlternatives(subject, text_content, from_email, users_mail_list)\n try:\n if sub['staff_template'] is not None:\n htmly = get_template(sub['staff_template']) # Define the HTML template\n html_content = htmly.render(context) # Rendering the templates with context information\n elif sub['staff_template_text'] != \"\":\n htmly = Template(sub['staff_template_text'])\n html_content = htmly.render(Context(context))\n elif sub['user_template'] is not None:\n htmly = get_template(sub['user_template']) # Define the HTML template\n html_content = htmly.render(context) # Rendering the templates with context information\n elif sub['user_template_text'] != \"\":\n htmly = Template(sub['user_template_text'])\n html_content = htmly.render(Context(context))\n msg.attach_alternative(html_content, 'text/html')\n msg.send()\n except:\n msg.send()\n print('Mail send to %s' % str(users_mail_list))", "def _get_message_body(self, template_file, message_data):\r\n return \"\"\"\r\nPlease click the link below to activate your account.\r\n\r\n{0}\r\n\r\nWe currently support importing from Google Bookmarks and Delicious exports.\r\nImporting from a Chrome or Firefox export does work, however it reads the\r\nfolder names in as tags. So be aware of that.\r\n\r\nGet the Chrome extension from the Chrome web store:\r\nhttps://chrome.google.com/webstore/detail/knnbmilfpmbmlglpeemajjkelcbaaega\r\n\r\nIf you have any issues feel free to join #bookie on freenode.net or report\r\nthe issue or idea on https://github.com/bookieio/Bookie/issues.\r\n\r\nWe also encourage you to sign up for our mailing list at:\r\nhttps://groups.google.com/forum/#!forum/bookie_bookmarks\r\n\r\nand our Twitter account:\r\nhttp://twitter.com/BookieBmarks\r\n\r\nBookie is open source. Check out the source at:\r\nhttps://github.com/bookieio/Bookie\r\n\r\n---\r\nThe Bookie Team\"\"\".format(message_data)", "def send_feedback_email_task(subject, message, sender, reciever):\n logger.info(\"Reminder email\")\n return send_reminder_mail(subject, message, sender, reciever)", "def notify_user(self, svno, ops):\n\n self.sr=svno\n self.ops=ops\n try:\n from email.mime.text import MIMEText\n from email.mime.multipart import MIMEMultipart\n except Exception, imperr:\n print(\"emailNotify failure - import error %s\" % imperr)\n return(-1)\n nHtml = []\n noHtml = \"\"\n clientEmail = ['helpdesk@mscsoftware.com']\n msg = MIMEMultipart()\n # This is the official email notifier\n rtUser = 'DONOTREPLY@mscsoftware.com'\n\n msg['From'] = rtUser\n msg['To'] = \", \".join(clientEmail)\n if self.data['groupid'] == 'Nastran-RG':\n msg[\"Cc\"] = \"msc-itsupport@mscsoftware.com,\\\n DL-ENG-BUILD@mscsoftware.com,\\\n raj.behera@mscsoftware.com\"\n elif self.data['groupid'] == 'Patran-RG':\n msg[\"Cc\"] = \"msc-itsupport@mscsoftware.com,\\\n DL-ENG-BUILD@mscsoftware.com,\\\n raj.behera@mscsoftware.com\"\n else:\n msg[\"Cc\"] = \"msc-itsupport@mscsoftware.com,\\\n DL-ENG-BUILD@mscsoftware.com,\\\n raj.behera@mscsoftware.com\"\n\n if self.ops == 'ipnw':\n msg['Subject'] = '%s regression got impacted due \\\n to vCAC cloud for VMID %s' % \\\n ( pdict[self.data['groupid']], self.sr['requestNumber'])\n else:\n msg['Subject'] = '%s regression got impacted due \\\n to vCAC cloud for service request: %s' % \\\n ( pdict[self.data['groupid']], self.sr['requestNumber'])\n\n nHtml.append(\"<html> <head></head> <body> <p>Jenkin's \\\n vCAC cloud client notification<br>\")\n nHtml.append(\"<b>Hi Helpdesk,</b><br><br><br>\")\n nHtml.append(\"Please create a ticket to solve the \\\n following problem and notify infra team.\")\n if self.ops == 'ipnw':\n nHtml.append(\"VM creation readiness from vCAC \\\n cloud is taking long time, \\\n vm creation service request completed, \\\n But network configuration is having an issue \\\n for VMID <b>%s</b> is stuck. \" % self.sr['requestNumber'])\n else:\n nHtml.append(\"Creation of VM through vCAC cloud is taking \\\n longer time than expected, the service \\\n request <b>%s</b> is stuck. \" % self.sr['requestNumber'])\n\n nHtml.append(\"Regression test for product <b>%s</b> \\\n is stuck and impacted.<br><br>\" % \\\n pdict[self.data['groupid']])\n if os.path.isdir(self.data['rundir']):\n jnfilepath=os.path.join(self.data['rundir'], 'hudjobname.dat')\n if os.path.isfile(jnfilepath):\n lines = [line.rstrip() for line in open(jnfilepath)]\n nHtml.append(\"Please follow job link for \\\n SR# related information.<br>\")\n nHtml.append(\"Jenkins Effected Job URL: <a href=%s> \\\n Effected Build Console \\\n </a><br><br><br>\" % (lines[0]))\n\n nHtml.append(\"This needs immediate attention.<br><br>\")\n nHtml.append(\"Regards,<br>\")\n nHtml.append(\"Rtest Administrator.<br>\")\n nHtml.append(\"[Note: This is an automated mail,\\\n Please do not reply to this mail.]<br>\")\n nHtml.append(\"</p> </body></html>\")\n noHtml = ''.join(nHtml)\n noBody = MIMEText(noHtml, 'html')\n msg.attach(noBody)\n s = smtplib.SMTP('postgate01.mscsoftware.com')\n s.sendmail(rtUser, [clientEmail] + msg[\"Cc\"].split(\",\"), msg.as_string())\n s.quit()\n return 0", "def _get_message_body(self, template_file, message_data):\r\n\r\n msg = \"\"\"\r\nYour bookmark import is complete! We've begun processing your bookmarks to\r\nload their page contents and fulltext index them. This process might take a\r\nwhile if you have a large number of bookmarks. Check out your imported\r\nbookmarks at https://bmark.us/{username}/recent.\r\n\r\n---\r\nThe Bookie Team\"\"\".format(**message_data)\r\n return msg", "def email_body_meeting_reminder():\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">Drats. <a href=\"#\" style=\"color:#1488CC\">{insert seller name} cancelled your appointment</a>.<br><br>'\n\tmsg = msg + '\\t\\t\\t <a href=\"#\" style=\"color:#1488CC\">Reschedule</a> or you can send a message to inquire about the cancellation. <br><br>'\n\tmsg = msg + '\\t\\t\\t And, don\\'t worry! You won\\'t be charged, promise. </font><br><br>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def notify_email(kwargs):\n SMTP_mail_secret_name = \"\" # setting up your AWS secret name\n email_creds = aws.get_secret(SMTP_mail_secret_name, '[regoin]') # setting the regoin to credentials\n emailfrom = email_creds['accountname']\n emailsto = ['[mail receiver]'] # setting up mail receiver\n emailscc = ['[mail cc ]'] # setting up mail cc\n print(f\"Sender: {emailfrom}\")\n\n username = email_creds['username']\n password = email_creds['password']\n server = email_creds['server']\n print(f\"Server: {server}\")\n\n \"\"\"Send custom email alerts.\"\"\"\n print(\"kwargs >>>> \", kwargs)\n ti = kwargs['ti']\n dag_run = kwargs['dag_run']\n var = kwargs['var']['json']\n params = kwargs['params']\n print(f\"ti: {ti}\")\n print(f\"dag_run: {dag_run}\")\n\n ### Get exception then parsing it\n if kwargs.get('exception') is not None and type(kwargs.get('exception')) == list:\n dh_excpt = \"During handling of the above exception, another exception occurred:\"\n matching_main = [s for s in kwargs['exception'] if \"/main.py\" in s]\n print(\"matching_main >>>> \", matching_main)\n \n if matching_main != []:\n matching_fist_text = matching_main[0]\n print(\"matching_fist_text >>>> \", matching_fist_text)\n matching_fist_index = kwargs['exception'].index(matching_fist_text)\n print(\"matching_fist_index >>>> \", matching_fist_index)\n\n matching_last_text = matching_main[-1]\n print(\"matching_last_text >>>> \", matching_last_text)\n matching_last_index = kwargs['exception'].index(matching_last_text)\n print(\"matching_last_index >>>> \", matching_last_index)\n\n if dh_excpt in kwargs['exception']:\n dhe_index = kwargs['exception'].index(dh_excpt)\n print(\"The index of dhe >>>> \", dhe_index)\n\n if matching_fist_index < dhe_index:\n # when \"/main.py\" first show before \"During handling...\" then remove after \"During handling...\" text until the end\n kwargs['exception'][dhe_index:] = []\n elif matching_fist_index > dhe_index:\n # when \"/main.py\" first show after \"During handling...\" then remove after another text until the end\n kwargs['exception'][matching_last_index+2:] = []\n\n formatted_exception = \"\\n\".join(kwargs['exception'])\n print(f\"formatted_exception: {formatted_exception}\")\n elif kwargs.get('exception') is not None: \n formatted_exception = kwargs['exception']\n print(f\"formatted_exception: {formatted_exception}\")\n\n title = ''\n body = ''\n print(\"dag_run.run_id >>>> \", dag_run.run_id)\n print(\"ti.task_id >>>> \", ti.task_id)\n print(\"ti.state >>>> \", ti.state)\n\n print(\"When ti.state == State.FAILED >>>> \") # ti.state == State.FAILED as same as ti.state == 'failed'\n title = f\"[TEST] Airflow alert: ({dag_run.run_id}) failed on ({ti.task_id})\"\n body = f\"Dears, \\n\\n\\n\" + \\\n f\"The job_id ({dag_run.run_id}) failed on ({ti.task_id}). \\n\" + \\\n f\"Check what goes wrong, the ERROR message is shown as below: \\n\\n\" + \\\n f\"{formatted_exception} \\n\\n\" + \\\n f\"Forever yours, \\n\" + \\\n f\"RDP Data Team\"\n print(\"check title >>>> \\n\", title)\n print(\"check body >>>> \\n\", body)\n print(f\"Prepare to send out the mail...\\n\\t\\tsubject: {title}\") \n se.email(emailfrom, emailsto, emailscc, username, password, server, body, subject = title)\n print(\"The email send out done.\")\n raise AirflowException(f\"AirflowException: Pleaes check what goes wrong this job_id ({dag_run.run_id}) failed on ({ti.task_id}).\")", "def send_reminder():\n\n name = config[\"email\"][\"name\"]\n user = config[\"email\"][\"user\"]\n subject = \"REMINDER: %s\" % sys.argv[1]\n body = sys.argv[2] if len(sys.argv) > 2 else \"\"\n email_helper.send(user, name, user, subject, body)", "def send_ctr_alert(date, ctr):\n sender = \"team1_rs@outlook.com\"\n receivers = [\"alexa.hernandez@mail.mcgill.ca\"]\n msg = MIMEText(\n f\"Hello Team1,\\n\\nToday's CTR has dropped below {str(MIN_CTR*100)}%. The CTR is {str(ctr*100)}%.\\nPlease \"\n f\"investigate immediately.\"\n )\n\n msg[\"Subject\"] = \"Team1 Recommendation Service - CTR Alert\"\n msg[\"From\"] = sender\n msg[\"To\"] = \";\".join(receivers)\n\n try:\n smtpObj = smtplib.SMTP(\"smtp.office365.com\", 587)\n smtpObj.ehlo()\n smtpObj.starttls()\n smtpObj.login(\"team1_rs@outlook.com\", \"team1*rs\")\n smtpObj.sendmail(sender, receivers, msg.as_string())\n print(\"Successfully sent email\")\n except smtplib.SMTPException as e:\n print(\"Error: unable to send email\")", "def _get_message_body(self, template_file, message_data):\r\n return \"\"\"\r\nHello {username}:\r\n\r\nPlease activate your Bookie account by clicking on the following url:\r\n\r\n{url}\r\n\r\n---\r\nThe Bookie Team\"\"\".format(**message_data)\r\n # lookup = config['pylons.app_globals'].mako_lookup\r\n # template = lookup.get_template(template_file)\r\n\r\n # # template vars are a combo of the obj dict and the extra dict\r\n # template_vars = {'data': message_data}\r\n # return template.render(**template_vars)\r", "def create_email(user):\n if 'research' in user.get_domains():\n domain = 'research'\n else: domain = 'academic'\n subject = \"ECE/CIS Account Created\"\n helprequest = \"https://www.eecis.udel.edu/service\"\n \n message = \"Your ECE/CIS %s account has been created with the username: %s\\n\\n\" % (domain, user.username)\n message += \"Please do not reply to this message. If you need assistance with your account, please visit:\\n\"\n message += \"%s\\n\\n\" % helprequest\n message += \"-- EE/CIS Labstaff\\n\"\n\n send('account@eecis.udel.edu', 'ECE/CIS Account System', \\\n [user.email], subject, message, MAILHOST)", "def test_notification_creation_email(self):\n mailhost = api.portal.get_tool('MailHost')\n self.assertEqual(len(mailhost.messages), 1)\n msg = message_from_string(mailhost.messages[0])\n\n self.assertEqual(msg['To'], BOARD_LIST_ADDRESS)\n self.assertEqual(\n msg['From'], 'EESTEC International <noreply@eestec.net>')\n self.assertEqual(\n msg['Subject'],\n '=?utf-8?q?=5BEVENTS=5D=5BCreated=5D_T=C3=A9st_event?=',\n )\n self.assertIn('a new Event has been created', msg.get_payload())\n self.assertIn('T=C3=A9st event', msg.get_payload())", "def get_mail( traceback ):\n msg = MIMEText( traceback )\n msg[ 'Subject' ] = Header( 'FX daily cron error' )\n msg[ 'From' ] = 'FX daily cron'\n msg[ 'To' ] = 'tamakoshihiroki@gmail.com'\n msg[ 'Date' ] = formatdate( localtime = 9 )\n msg[ 'Content-Type' ] = ''.join(\n [ 'text/plain; charset=\"', BODY_ENCODING, '\"', ] )\n return msg", "def generate_email(self):\n email_dict = {'donor_name':self.name,\n 'donation_amount':self.last_donation(),\n 'total_amount':self.total_donations()}\n\n # Create formatted email that can be copied & pasted\n email = ('\\n'.join(['Dear {donor_name},','',\n 'Thank you for your generous donation of ${donation_amount:.2f}.',\n 'To date, you have donated a total of ${total_amount:.2f} to our charity.',\n 'Your contributions help new arrivals receive the highest quality care possible.',\n 'Please know that your donations make a world of difference!',\n '','Sincerely,','The Good Place Team'])).format(**email_dict)\n\n return(email)", "def sendNotification(self):\n if not(self.errors or self.accounting):\n return S_OK()\n\n emailBody = \"\"\n rows = []\n for instanceName, val in self.accounting.iteritems():\n rows.append([[instanceName],\n [val.get('Treatment', 'No Treatment')],\n [str(val.get('LogAge', 'Not Relevant'))]])\n\n if rows:\n columns = [\"Instance\", \"Treatment\", \"Log File Age (Minutes)\"]\n emailBody += printTable(columns, rows, printOut=False, numbering=False, columnSeparator=' | ')\n\n if self.errors:\n emailBody += \"\\n\\nErrors:\"\n emailBody += \"\\n\".join(self.errors)\n\n self.log.notice(\"Sending Email:\\n\" + emailBody)\n for address in self.addressTo:\n res = self.nClient.sendMail(address, self.emailSubject, emailBody, self.addressFrom, localAttempt=False)\n if not res['OK']:\n self.log.error(\"Failure to send Email notification to \", address)\n continue\n\n self.errors = []\n self.accounting.clear()\n\n return S_OK()", "def send_main_email(self):\n\n print \"Sending main email\"\n \n # Make an html table to be body of email\n html_table = '<table style=\"font-size:12px\">'\n html_table += self.make_nfs_changed_rows(\"sprint\") # New features only\n html_table += self.make_nfs_changed_rows(\"status\") # New features only\n html_table += self.make_time_in_status_rows(self.stalled_nf_issues) \n html_table += self.make_time_in_status_rows(self.stalled_st_issues) # Sub-tasks\n html_table += '</table>' # Closing table tag\n\n recipients = self.config.get(\"recipients\", \"emails\").split(\"\\n\") # [recipients] section in .ini file\n \n# emails = self.config.items('recipients')\n# for key, email in emails:\n# recipients = ', '.join(self.config.items('recipients'))\n \n print recipients\n# sys.exit()\n self.send_email(recipients, html_table)", "def send_contact_us_receipt_email(**data):\n mail_file = os.path.join(APP_PATH, \"templates\", \"main\",\n \"contact-us-receipt\", \"content.txt\")\n with open(mail_file, \"r\") as f:\n msg_text = f.read()\n msg_html = render_template(\"main/contact-us-receipt/content.html\")\n msg = Message(\n f'[SetNow Support] Re: {data[\"subject\"]}',\n sender=\"setnow@tuta.io\",\n recipients=[data[\"email\"]],\n )\n msg.body = msg_text\n msg.html = msg_html\n mail.send(msg)", "def notification(self, approver_list):\n dns_name = axops_client.get_dns()\n job_id = self.root_id\n url_to_ui = 'https://{}/app/jobs/job-details/{}'.format(dns_name, job_id)\n service = axops_client.get_service(job_id)\n\n html_payload = \"\"\"\n<html>\n<body>\n <table class=\"email-container\" style=\"font-size: 14px;color: #333;font-family: arial;\">\n <tr>\n <td class=\"msg-content\" style=\"padding: 20px 0px;\">\n The {} job is waiting for your approval. The job was triggered by {}.\n </td>\n </tr>\n <tr>\n <td class=\"commit-details\" style=\"padding: 20px 0px;\">\n <table cellspacing=\"0\" style=\"border-left: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;border-top: 1px solid #e3e3e3;\">\n <tr>\n <td class=\"item-label\" style=\"font-weight: bold;height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;\">Author</td>\n <td class=\"item-value\" style=\"height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;\">{}</td>\n </tr>\n <tr>\n <td class=\"item-label\" style=\"font-weight: bold;height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;\">Repo</td>\n <td class=\"item-value\" style=\"height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;\">{}</td>\n </tr>\n <tr>\n <td class=\"item-label\" style=\"font-weight: bold;height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;\">Branch</td>\n <td class=\"item-value\" style=\"height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;\">{}</td>\n </tr>\n <tr>\n <td class=\"item-label\" style=\"font-weight: bold;height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;\">Description</td>\n <td class=\"item-value\" style=\"height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;\">{}</td>\n </tr>\n <tr>\n <td class=\"item-label\" style=\"font-weight: bold;height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;\">Revision</td>\n <td class=\"item-value\" style=\"height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;\">{}</td>\n </tr>\n </table>\n </td>\n </tr>\n <tr>\n <td class=\"view-job\">\n <div>\n <!--[if mso]>\n <v:roundrect xmlns:v=\"urn:schemas-microsoft-com:vml\" xmlns:w=\"urn:schemas-microsoft-com:office:word\" href=\"{}\" style=\"height:40px;v-text-anchor:middle;width:150px;\" arcsize=\"125%\" strokecolor=\"#00BDCE\" fillcolor=\"#7fdee6\">\n <w:anchorlock/>\n <center style=\"color:#333;font-family:arial;font-size:14px;font-weight:bold;\">VIEW JOB</center>\n </v:roundrect>\n<![endif]--><a href=\"{}\" style=\"background-color:#7fdee6;border:1px solid #00BDCE;border-radius:50px;color:#333;display:inline-block;font-family:arial;font-size:14px;font-weight:bold;line-height:40px;text-align:center;text-decoration:none;width:150px;-webkit-text-size-adjust:none;mso-hide:all;\">VIEW JOB</a></div>\n </td>\n </tr>\n <tr>\n <td class=\"view-job\">\n <div>\n <!--[if mso]>\n <v:roundrect xmlns:v=\"urn:schemas-microsoft-com:vml\" xmlns:w=\"urn:schemas-microsoft-com:office:word\" href=\"{}\" style=\"height:40px;v-text-anchor:middle;width:150px;\" arcsize=\"125%\" strokecolor=\"#00BDCE\" fillcolor=\"#7fdee6\">\n <w:anchorlock/>\n <center style=\"color:#333;font-family:arial;font-size:14px;font-weight:bold;\">APPROVE</center>\n </v:roundrect>\n<![endif]--><a href=\"{}\" style=\"background-color:#7fdee6;border:1px solid #00BDCE;border-radius:50px;color:#333;display:inline-block;font-family:arial;font-size:14px;font-weight:bold;line-height:40px;text-align:center;text-decoration:none;width:150px;-webkit-text-size-adjust:none;mso-hide:all;\">APPROVE</a></div>\n </td>\n </tr>\n <tr>\n <td class=\"view-job\">\n <div>\n <!--[if mso]>\n <v:roundrect xmlns:v=\"urn:schemas-microsoft-com:vml\" xmlns:w=\"urn:schemas-microsoft-com:office:word\" href=\"{}\" style=\"height:40px;v-text-anchor:middle;width:150px;\" arcsize=\"125%\" strokecolor=\"#00BDCE\" fillcolor=\"#7fdee6\">\n <w:anchorlock/>\n <center style=\"color:#333;font-family:arial;font-size:14px;font-weight:bold;\">DECLINE</center>\n </v:roundrect>\n<![endif]--><a href=\"{}\" style=\"background-color:#7fdee6;border:1px solid #00BDCE;border-radius:50px;color:#333;display:inline-block;font-family:arial;font-size:14px;font-weight:bold;line-height:40px;text-align:center;text-decoration:none;width:150px;-webkit-text-size-adjust:none;mso-hide:all;\">DECLINE</a></div>\n </td>\n </tr>\n <tr>\n <td class=\"thank-you\" style=\"padding-top: 20px;line-height: 22px;\">\n Thanks,<br>\n Argo Project\n </td>\n </tr>\n </table>\n</body>\n</html>\n\"\"\"\n\n for user in approver_list:\n\n approve_token, decline_token = self.generate_token(user=user, dns_name=dns_name)\n\n approve_link = \"https://{}/v1/results/id/approval?token={}\".format(dns_name, approve_token)\n decline_link = \"https://{}/v1/results/id/approval?token={}\".format(dns_name, decline_token)\n\n msg = {\n 'to': [user],\n 'subject': 'The {} job requires your approval to proceed'.format(service['name']),\n 'body': html_payload.format(service['name'], service['user'],\n service['commit']['author'], service['commit']['repo'],\n service['commit']['branch'], service['commit']['description'], service['commit']['revision'],\n url_to_ui, url_to_ui, approve_link, approve_link, decline_link, decline_link),\n 'html': True\n }\n\n if service['user'] != 'system':\n try:\n user_result = axops_client.get_user(service['user'])\n msg['display_name'] = \"{} {}\".format(user_result['first_name'], user_result['last_name'])\n except Exception as exc:\n logger.error(\"Fail to get user %s\", str(exc))\n\n logger.info('Sending approval requests to %s', str(user))\n result = axsys_client.send_notification(msg)\n\n # TODO: Tianhe adding retry mechanism\n if result.status_code != 200:\n logger.error('Cannot send approval request, %s', result.content)\n sys.exit(1)\n logger.info('Successfully sent approval requests to reviewers.')", "def send_reminder(self):\n message_contents = \"This is a reminder that your event: \" + self.event_title + \" takes place on \" + self.event_date + \" in \" + self.event_location\n subject = \"Event Reminder\"\n attendees = self.gameplanuser_set.all()\n for attendee in attendees:\n remindermessage = Message.objects.create(sender=self.event_manager, recipient=attendee, contents=message_contents)\n remindermessage.save()", "def generate_notice_email(notice):\n subject = notice.subject\n from_email = settings.DEFAULT_FROM_ADDR\n reply_to_email = settings.EMAIL_TARGET_S\n to_email = notice.email_to.email\n\n context = {'object': notice}\n\n cont_html = render_to_string('emails/email_notice.html', context)\n cont_text = render_to_string('emails/email_notice.txt', context)\n\n email = EmailMultiAlternatives(subject, cont_text, from_email, [to_email], reply_to=[reply_to_email])\n email.attach_alternative(cont_html, \"text/html\")\n\n return email", "def render_message(template_name, extra_context={}):\n mail_text = _render_mail_template(template_name, extra_context)\n rendered_mail = mail_text.replace(u\"\\r\\n\", u\"\\n\").replace(u\"\\r\", u\"\\n\").split(u\"\\n\")\n return rendered_mail[0], \"\\n\".join(rendered_mail[1:])", "def generate_plain_mesg(info, open_quests, owner, tags):\n\n msg = (\n \"This email is being sent to {} because that is the owner listed\\n\"\n \"for the systems with open Hermes labors listed below.\\n\\n\"\n \"Due dates, if any, are noted with each quest.\\n\".format(owner)\n )\n msg += (\n \"\\nTo throw an event manually, you can run the following command \"\n \"on a shell server:\"\n \"\\n\\n\"\n \"$ hermes event create [event] --host [hostname].\\n\\n\"\n \"Or you can visit the quests linked below.\\n\\n\".format(\n settings.frontend)\n )\n for quest_id in info[owner]:\n quest = find_quest(open_quests, quest_id)\n if quest:\n msg += (\n \"==[ QUEST {} ]================================\\n\"\n \"CREATOR: {}\\n\"\n ).format(\n quest_id, quest.creator\n )\n if quest.target_time:\n msg += \"DUE: {}\\n\".format(quest.target_time)\n msg += \"DESC: \\\"{}\\\"\\n\".format(textwrap.fill(\n quest.description,\n width=60, subsequent_indent=\"\"\n ))\n msg += \"LINK: {}/v1/quests/{}\\n\\n\".format(\n settings.frontend, quest_id\n )\n else:\n msg += \" Labors not associated with a quest:\\n\\n\"\n\n msg += \"Machines with labors:\\n\"\n\n for hostname in sorted(info[owner][quest_id]):\n if tags[hostname]:\n tags_str = \"{}\".format((\", \".join(tags[hostname])))\n else:\n tags_str = \"no services\"\n msg += \" {} ({})\\n\".format(hostname, tags_str)\n\n msg += \"\\n\\n\"\n\n return msg", "def get_email():\n return Email(\n subject='[Messages] Integration Test',\n body='Conducting Integration Testing',\n attachments=str(TESTDIR.joinpath('file2.png')))", "def send_email(\n template_name: str,\n to: typing.Union[str, typing.List[str]],\n personalisation: dict = None,\n reference: str = None,\n staff_email: bool = None,\n retry_attempts: int = 2,\n spoolable_ctx: Context = None,\n):\n client = NotifyClient.shared_client()\n try:\n client.send_email(\n template_name=template_name,\n to=to,\n personalisation=personalisation,\n reference=reference,\n staff_email=staff_email,\n )\n except APIError as e:\n should_retry = (\n # no retry without uWSGI spooler\n spoolable_ctx.spooled\n # no retry if run out of retry attempts\n and retry_attempts\n # retry only for \"service unavailable\" / \"internal error\" statuses\n and 500 <= e.status_code < 600\n # …unless it was caused by an invalid json response\n and not isinstance(e, InvalidResponse)\n )\n if should_retry:\n send_email(\n template_name=template_name,\n to=to,\n personalisation=personalisation,\n reference=reference,\n staff_email=staff_email,\n retry_attempts=retry_attempts - 1,\n )\n else:\n raise e", "def build_hello_email():\n from_email = Email(\"test@example.com\")\n subject = \"Hello World from the SendGrid Python Library\"\n to_email = Email(\"test@example.com\")\n content = Content(\"text/plain\", \"some text here\")\n mail = Mail(from_email, subject, to_email, content)\n mail.personalizations[0].add_to(Email(\"test2@example.com\"))\n\n return mail.get()", "def test_template():\n \n # Keywords and values to be filled into the template\n items = {'item_1': 'First', 'long_keyword_item_2': 'Second',\n 'space_3': 'Third Third Third ', 'item_4': 'Fourth',\n 'item_5': None}\n \n sender = 'dummy@moc.org'\n receiver = 'dummy@moc.org'\n result = 'First Second\\nThird Third Third Fourth\\n'\n \n # TEST_DIR = os.path.dirname(os.path.abspath(__file__))\n template = os.path.abspath(os.path.join(TEST_DIR, 'test_template.txt'))\n\n msg = TemplateMessage(sender=sender, email=receiver, template=template,\n **items)\n assert msg.body == result", "def send_feedback_message_email(recipient_id, feedback_messages):\n email_subject_template = (\n 'You\\'ve received %s new message%s on your explorations')\n\n email_body_template = (\n 'Hi %s,<br>'\n '<br>'\n 'You\\'ve received %s new message%s on your Oppia explorations:<br>'\n '<ul>%s</ul>'\n 'You can view and reply to your messages from your '\n '<a href=\"https://www.oppia.org/creator_dashboard\">dashboard</a>.'\n '<br>'\n '<br>Thanks, and happy teaching!<br>'\n '<br>'\n 'Best wishes,<br>'\n 'The Oppia Team<br>'\n '<br>%s')\n\n if not feconf.CAN_SEND_EMAILS:\n log_new_error('This app cannot send emails to users.')\n return\n\n if not feconf.CAN_SEND_FEEDBACK_MESSAGE_EMAILS:\n log_new_error('This app cannot send feedback message emails to users.')\n return\n\n if not feedback_messages:\n return\n\n recipient_user_settings = user_services.get_user_settings(recipient_id)\n\n messages_html = ''\n count_messages = 0\n for exp_id, reference in feedback_messages.iteritems():\n messages_html += (\n '<li><a href=\"https://www.oppia.org/create/%s#/feedback\">'\n '%s</a>:<br><ul>' % (exp_id, reference['title']))\n for message in reference['messages']:\n messages_html += ('<li>%s<br></li>' % message)\n count_messages += 1\n messages_html += '</ul></li>'\n\n email_subject = email_subject_template % (\n (count_messages, 's') if count_messages > 1 else ('a', ''))\n\n email_body = email_body_template % (\n recipient_user_settings.username, count_messages if count_messages > 1\n else 'a', 's' if count_messages > 1 else '', messages_html,\n EMAIL_FOOTER.value)\n\n _send_email(\n recipient_id, feconf.SYSTEM_COMMITTER_ID,\n feconf.EMAIL_INTENT_FEEDBACK_MESSAGE_NOTIFICATION,\n email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)", "def email_body_review_reminder():\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr>td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;padding-left:75px; padding-right:75px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t <font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">We hope you had a great appointment!<br>'\n\tmsg = msg + '\\t\\t\\t Your opinion goes a long way&mdash;write up your review of the appointment so others can learn from your experience with <a href=\"#\" style=\"color:#1488CC\">{user\\'s name}</a></font><br><br>'\n\tmsg = msg + '\\t</td></tr>'\n\n\tmsg = msg + '<td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:10px;padding-left:75px;padding-bottom:200px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '<a href=\"#\" style=\"color:#ffffff;text-decoration: none;display: inline-block;min-height: 38px;line-height: 39px;padding-right: 16px;padding-left: 16px;background: #1488CC;font-size: 14px;border-radius: 3px;border: 1px solid #1488CC;font-family:Garamond, EB Garamond, Georgia, serif; width:100px;text-align:center;\" target=\"_blank\">Rate & Review</a>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def send_message():\r\n global count\r\n sutasfolder = os.path.join(os.path.expanduser('~'), \"Sutas_Logs\")\r\n slckobj = SlackNotification().slackobj()\r\n slc = slckobj.send_message()\r\n tmsobj = TeamsNotification().teamsobj()\r\n tms = tmsobj.send_message()\r\n globfilepath = os.path.join(expanduser('~'), \"global_conf.yaml\")\r\n globdata = get_data_from_yaml(globfilepath)\r\n if \"logpath\" in os.environ:\r\n # getting the testsuite name from logpath\r\n mailfile = os.path.basename(os.environ['logpath']).split(\r\n str(datetime.datetime.now().year))[0]\r\n # Inside testsuite folder in sutaslogs we are creating a file with\r\n # testsuite name. This file will be used to store the notification\r\n # messages\r\n mailfile = os.path.join(os.path.dirname(os.environ['logpath']),\r\n mailfile)\r\n if os.path.isfile(mailfile):\r\n if count == 0:\r\n os.remove(mailfile)\r\n count = 1\r\n # suitelogpaths file is created in sutaslog folder which is\r\n # in user's home directory.\r\n suitelogpaths = os.path.join(sutasfolder, \"suitelogpaths\")\r\n flag = False\r\n \r\n if globdata.get('Consolidatedmail','no').lower() == 'yes':\r\n mode = \"a\"\r\n else:\r\n mode = \"w\"\r\n # Checks if suitelogpath file already exists.\r\n if os.path.isfile(suitelogpaths):\r\n # checking if the logpath is already in the suitelogpaths file.\r\n # if path exists then continue else writes the path in to file.\r\n with open(suitelogpaths, 'r') as suite:\r\n for line in suite.read().strip().splitlines():\r\n if os.environ['logpath'] in line:\r\n flag = True\r\n if not flag:\r\n with open(suitelogpaths, mode) as suite: \r\n suite.write(os.environ['logpath'])\r\n suite.write('\\n')\r\n else:\r\n # creates suitelogpaths file if doesn't exist and writes\r\n # log path in to it.\r\n with open(suitelogpaths, mode) as suite: \r\n suite.write(os.environ['logpath'])\r\n suite.write('\\n')\r\n #writing notification messages in to a testsuite file which is\r\n #created in testsuite folder.\r\n with open(mailfile, 'a') as agg:\r\n agg.write(os.environ[\"sutasmessages\"])\r\n os.environ[\"sutasmail\"] = os.environ[\"sutasmessages\"]\r\n os.environ[\"sutasmessages\"] = \"\"\r\n msgs = {\"slack\": slc, \"teams\": tms}\r\n if slc != \"success\" or tms != \"success\":\r\n return msgs\r\n else:\r\n return \"success\"", "def send_notification(self, context):\n subject = \"Order placed for %s [%s]\" % (context['product']['name'], context['name'])\n message = render_to_string('notification/product_notification.txt',\n context)\n try:\n send_mail(subject, message, settings.DEFAULT_FROM_EMAIL,\n self.get_recipients())\n except SMTPException as e:\n logger.error(\"Error sending notification: %s\" % e)\n else:\n logger.info(\"Sent notification for %s [%s]\" % (context['product']['name'], context['name']))", "def mail_template(self, template_name, send_to=None, user = None, event_title=\"\", **kwargs):\n barcamp = kwargs.get('barcamp')\n if user is None:\n user = self.user\n if send_to is None:\n send_to = user.email\n if barcamp is not None:\n subject = barcamp.mail_templates['%s_subject' %template_name]\n tmpl = jinja2.Template(barcamp.mail_templates['%s_text' %template_name])\n kwargs['fullname'] = user.fullname\n payload = tmpl.render(**kwargs)\n payload = payload.replace('((fullname))', user.fullname)\n payload = payload.replace('((event_title))', event_title)\n mailer = self.app.module_map['mail']\n mailer.mail(send_to, subject, payload)", "def send_welcome_mail(backend, details, response, user, is_new=False, *args, **kwargs):\n\n if is_new:\n context = Context({'user': user, 'ga_campaign_params' : 'utm_source=unishared&utm_content=v1&utm_medium=e-mail&utm_campaign=welcome_mail'})\n\n email_task.apply_async([u'Welcome on UniShared!', context, 'welcome_mail', [user.email]], eta= datetime.utcnow() + timedelta(hours=1))", "def send_notification(self, msg_body='', subject=None, mail_to=None, data_dict=None, body_style=''):\n if self._mail_body_footer:\n msg_body += '\\n' + self._mail_body_footer\n if not subject:\n subject = 'Notification'\n if self._used_system:\n subject += ' [' + self._used_system + ']'\n if not mail_to:\n mail_to = self._mail_to\n title_ext = \" with subject='\" + subject + \"'\" + \\\n (\" and data_dict='\" + str(data_dict) + \"'\" if data_dict else \"\") + \".\"\n if isinstance(mail_to, str):\n mail_to_expr = mail_to\n try:\n mail_to = try_eval(mail_to_expr) # data_dict to check data, subject/msg_body to mail content\n except Exception as ex:\n po(\" **** Notification.send_notification() exception '\" + str(ex) +\n \"' on evaluating of expression '\" + str(mail_to_expr) +\n \"'\" + title_ext)\n if not isinstance(mail_to, list):\n po(\" **** Notification.send_notification(): invalid email-to address list or expression '\" +\n str(mail_to) + \"' - using fallback!\")\n mail_to = ['aecker2@gmail.com']\n body_style = body_style or 'html' if '</' in msg_body else 'plain'\n if body_style == 'html':\n # using the <pre>...</pre> tags we no longer need replace(' ', '&nbsp;')\n msg_body = str(msg_body).replace('\\r\\n', '<br>').replace('\\n', '<br>').replace('\\r', '<br>')\n # adding html special char conversion disables all other html tags too:\n # .replace('&amp;', '&').replace('&lt;', '<').replace('&gt;', '>') \\\n # .replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;')\n\n # log error message and try to send it per email\n if self.debug_level >= DEBUG_LEVEL_VERBOSE:\n po(\" #### Notification.send_notification(): BODY{\" + msg_body[:MAX_LEN_BODY_IN_LOG] + \"..}\" + title_ext)\n err_msg = ''\n try:\n message = MIMEText(msg_body, _subtype=body_style)\n message['Subject'] = subject\n message['From'] = self._mail_from\n message['To'] = ', '.join(mail_to)\n # Oracle P_SENDMAIL() is using smtp server as local host\n # SMTP_SSL could throw \"SSL:UNKNOWN_PROTOCOL\" error\n conn_type = SMTP_SSL if self._mail_port == SSL_ENC_PORT or self._mail_service == SSL_ENC_SERVICE_NAME \\\n else SMTP\n with conn_type(self._mail_host, self._mail_port, local_hostname=self._local_mail_host) as s:\n # if self.debug_level >= DEBUG_LEVEL_VERBOSE:\n # s.set_debuglevel(1)\n s.ehlo()\n # using s.starttls() could throwing error \"STARTTLS extension not supported by server.\"\n if self._mail_service == TSL_ENC_SERVICE_NAME:\n s.starttls()\n if self._user_name:\n s.login(self._user_name, self._user_password)\n unreached_recipients = s.send_message(message, self._mail_from, mail_to)\n if unreached_recipients:\n err_msg = 'Unreached Recipients: ' + str(unreached_recipients)\n except Exception as mex:\n err_msg = 'mail send exception: {}'.format(mex)\n\n if err_msg and self.debug_level >= DEBUG_LEVEL_ENABLED:\n po(' **** Notification.send_notification() error: {}.'.format(err_msg))\n\n return err_msg", "def create_email_body_for_update_resources(results):\n\n failures = [url for url in results if results[url][\"state\"] == \"Failed\"]\n warnings = [url for url in results if results[url][\"state\"] == \"Warning\"]\n successes = [url for url in results if results[url][\"state\"] == \"Succeeded\"]\n\n body, html_content = \"\", \"\"\n\n # Failed\n if failures:\n body += f\"Failed [{len(failures)}]\\n\\n\"\n html_content += f\"<h2>Failed [{len(failures)}]</h2>\\n\\n\"\n\n for url in failures:\n result = results[url]\n\n body += f\"Resource: {url}\\n\"\n html_content += f'<h3 style=\"color: red;\">Resource: {url}</h3>\\n'\n\n html_content += \"<ul>\\n\"\n for message in result[\"messages\"]:\n body += f\" {message}\\n\"\n html_content += f\"<li>{message}</li>\\n\"\n html_content += \"</ul>\\n\"\n body += \"\\n\\n\"\n\n # Warnings\n if warnings:\n body += f\"Warnings [{len(warnings)}]\\n\\n\"\n html_content += f\"<h2>Failed [{len(warnings)}]</h2>\\n\\n\"\n\n for url in warnings:\n result = results[url]\n\n body += f\"Resource: {url}\\n\"\n html_content += f'<h3 style=\"color: orange;\">Resource: {url}</h3>\\n'\n\n html_content += \"<ul>\\n\"\n for message in result[\"messages\"]:\n body += f\" {message}\\n\"\n html_content += f\"<li>{message}</li>\\n\"\n html_content += \"</ul>\\n\"\n body += \"\\n\\n\"\n\n # Succeeded\n if successes:\n body += f\"Succeeded [{len(successes)}]\\n\\n\"\n html_content += f\"<h2>Succeeded [{len(successes)}]</h2>\\n\"\n\n for url in successes:\n result = results[url]\n\n body += f\"Resource: {url}\\n\"\n html_content += f'<h3 style=\"color: green;\">Resource: {url}</h3>\\n'\n\n html_content += \"<ul>\\n\"\n for message in result[\"messages\"]:\n body += f\" {message}\\n\"\n html_content += f\"<li>{message}</li>\\n\"\n html_content += \"</ul>\\n\"\n body += \"\\n\\n\"\n\n body_html = f\"\"\"\n<!DOCTYPE html>\n<html lang=\"en\">\n <head>\n <title>Updated BEL Resources for {settings.HOST_NAME}</title>\n </head>\n <body>\n <div id=\"content\">{html_content}</div>\n </body>\n</html>\n \"\"\"\n\n return (body, body_html)", "def create_text_messages(file_path: str) -> None:\n\n data = load_data(file_path)\n month_name = file_path[file_path.rindex(\"\\\\\", -14) + 1:-4]\n\n messages = create_messages(data, month_name)\n success = send_messages(messages)\n print(success)", "def notification_email(self, sender, subject, body):\n\n\t\tts = str(int(time.time())*1000)\n\t\tparts = [sender, body, ts, subject]\n\t\tself._send_message(\"NOTIFICATION\", self._pack_message_data(0, parts))", "def notifySysOperator(self):\n msg = self.generateNotifyMessage()\n print(msg)\n # with smtplib.SMTP('smtp.gmail.com', 587) as smtp:\n # smtp.ehlo()\n # smtp.starttls()\n # smtp.ehlo()\n\n # smtp.login(\"aladinshixi@gmail.com\", \"qwerQWER123.\")\n\n # smtp.sendmail(\"aladinshixi@gmail.com\", \"aladinshixi@gmail.com\", msg)\n\n # smtp.close()\n return False", "def template_message(include_title=False, template='markdown.md.j2', exclude_labels=True, current_length=0, **kwargs):\n processed = {'message': ''}\n alerts_count = len(kwargs['alerts'])\n title = f\"{alerts_count} alert(s) received\"\n if not include_title:\n processed.update({'title': f\"{title}\"})\n title = None\n processed['message'] = render_template(\n template,\n title=title,\n alerts=kwargs['alerts'],\n external_url=kwargs['external_url'],\n receiver=kwargs['receiver'],\n exclude_labels=exclude_labels,\n current_length=current_length,\n )\n for alert in kwargs['alerts']:\n if int(alert['annotations'].get('priority', -1)) > processed.get('priority', -1):\n processed['priority'] = int(alert['annotations']['priority'])\n return processed", "def send_message(user_id, name, user_info, subject, body):\n send_mail(subject, body, settings.SERVER_EMAIL, [\"%s <%s>\" % (name, user_id)],\n fail_silently=False, html_message=body)", "def send_notification(self):\n # Sending the notification\n tbs = TestBuild.objects.filter(pk__in = self.create_builds_list)\n tbs = tbs.order_by('product')\n\n tbp_pks = list(set(tbs.values_list('product', flat=True)))\n ps = Product.objects.filter(pk__in = tbp_pks)\n\n message = MAIL_HEADER\n\n line = '=' * 30 + '\\n'\n\n for p in ps:\n p_str = unicode(p)\n\n message += line + p_str + '\\n' + line\n for tb in tbs:\n if tb.product == p:\n message += '* ' + unicode(tb) + '\\n'\n message += '\\n'\n\n mail_to = []\n for admin in ADMINS:\n mail_to.append(admin[1])\n\n send_mail(MAIL_SUBJECT, message, MAIL_FROM, mail_to)", "def createThankYouEmail(self):\n result = (\"\\nDear {:s},\\n\\n\"\n \"\\tThank you so much for your generous donation of ${:,.2f}!\\n\\n\"\n \"\\tIt will be put to very good use.\\n\\n\"\n \"\\t\\tSincerely,\\n\\t\\t\\t- The Team\".format(self.name, self.getTotDonation())\n )\n return result", "def __str__(self):\n email_template = '\\n'.join((f'\\n\\nDear {self._full_name},\\n',\n f'Thank you for your very kind donation of ${self.last_donation:.2f}.\\n',\n 'It will be put to very good use.\\n',\n ' Sincerely,',\n ' -The Team\\n'))\n return email_template", "def mail_text(self, template_name, subject, send_to=None, user = None, **kwargs):\n if user is None:\n user = self.user\n if send_to is None:\n send_to = user.email\n payload = self.render_lang(template_name, **kwargs)\n mailer = self.app.module_map['mail']\n mailer.mail(send_to, subject, payload)", "def mail_text(self, template_name, subject, send_to=None, user = None, **kwargs):\n if user is None:\n user = self.user\n if send_to is None:\n send_to = user.email\n payload = self.render_lang(template_name, **kwargs)\n mailer = self.app.module_map['mail']\n mailer.mail(send_to, subject, payload)", "def send_swarms_list_email(message, ref):\n swarm_list_email = settings.DEFAULT_SWARMS_EMAIL\n contact_email = settings.DEFAULT_FROM_EMAIL\n subject = render_to_string(\n 'contact/swarm_emails/new_swarm_subject.txt',\n {'message': message, 'ref': ref})\n body = render_to_string(\n 'contact/swarm_emails/new_swarm_body.txt',\n {'message': message, 'contact_email': contact_email})\n\n send_mail(\n subject,\n body,\n settings.DEFAULT_FROM_EMAIL,\n [swarm_list_email]\n )", "def _get_message_body(self, template_file, message_data):\r\n\r\n msg = \"\"\"\r\nYour import has failed. The error is listed below. Please file a bug at\r\nhttps://github.com/bookieio/bookie/issues if this error continues. You may\r\nalso join #bookie on freenode irc if you wish to aid in debugging the issue.\r\nIf the error pertains to a specific bookmark in your import file you might try\r\nremoving it and importing the file again.\r\n\r\nError\r\n----------\r\n\r\n{exc}\r\n\r\nA copy of this error has been logged and will be looked at.\r\n\r\n---\r\nThe Bookie Team\"\"\".format(**message_data)\r\n return msg", "def send_now(users, label, extra_context=None, on_site=True):\r\n if extra_context is None:\r\n extra_context = {}\r\n \r\n notice_type = NoticeType.objects.get(label=label)\r\n\r\n current_site = Site.objects.get_current()\r\n notices_url = u\"http://%s%s\" % (\r\n unicode(current_site),\r\n reverse(\"notification_notices\"),\r\n )\r\n\r\n current_language = get_language()\r\n\r\n formats = (\r\n 'short.txt',\r\n 'full.txt',\r\n 'notice.html',\r\n 'full.html',\r\n ) # TODO make formats configurable\r\n\r\n for user in users:\r\n recipients = []\r\n # get user language for user from language store defined in\r\n # NOTIFICATION_LANGUAGE_MODULE setting\r\n try:\r\n language = get_notification_language(user)\r\n except LanguageStoreNotAvailable:\r\n language = None\r\n\r\n if language is not None:\r\n # activate the user's language\r\n activate(language)\r\n\r\n # update context with user specific translations\r\n context = Context({\r\n \"user\": user,\r\n \"notice\": ugettext(notice_type.display),\r\n \"notices_url\": notices_url,\r\n \"current_site\": current_site,\r\n })\r\n context.update(extra_context)\r\n\r\n # get prerendered format messages\r\n messages = get_formatted_messages(formats, label, context)\r\n\r\n # Strip newlines from subject\r\n subject = ''.join(render_to_string('notification/email_subject.txt', {\r\n 'message': messages['short.txt'],\r\n }, context).splitlines())\r\n\r\n body = render_to_string('notification/email_body.txt', {\r\n 'message': messages['full.txt'],\r\n }, context)\r\n\r\n notice = Notice.objects.create(user=user, message=messages['notice.html'],\r\n notice_type=notice_type, on_site=on_site)\r\n if should_send(user, notice_type, \"1\") and user.email: # Email\r\n recipients.append(user.email)\r\n send_mail(subject, body, settings.DEFAULT_FROM_EMAIL, recipients)\r\n\r\n # reset environment to original language\r\n activate(current_language)", "def send_batch_notification(batch_name):\r\n meta = batch_metadata(batch_name)\r\n status_details = sorted(batch_job_status_details(batch_name),\r\n key=operator.itemgetter('start_time', 'end_time'),\r\n reverse=True)\r\n batch_status = (0 if all(job['job_status'] == 0 for job in status_details)\r\n else -1)\r\n if meta['notification_email_recipients']:\r\n html_table_start_tag = '<table style=\"width:100%\">'\r\n html_table_header_row = ('<tr><th>Name</th><th>Status</th>'\r\n '<th>Start Time</th><th>End Time</th></tr>')\r\n html_table_row_template = (\r\n '<tr><td>{job_name}</td><td>{job_status_description}</td>'\r\n '<td>{start}</td><td>{end}</tr>'\r\n )\r\n html_table_end_tag = '</table>'\r\n html_table_rows = []\r\n for job in status_details:\r\n job['start'] = (job['start_time'].strftime('%Y-%m-%d %H:%M')\r\n if job['start_time'] else 'n/a')\r\n job['end'] = (job['end_time'].strftime('%Y-%m-%d %H:%M')\r\n if job['end_time'] else 'n/a')\r\n html_table_rows.append(html_table_row_template.format(**job))\r\n html_full_string = \"{start}{header}{rows}{end}\".format(\r\n start=html_table_start_tag, header=html_table_header_row,\r\n rows=''.join(html_table_rows), end=html_table_end_tag\r\n )\r\n communicate.send_email(\r\n subject=\"ETL Batch: {} ({})\".format(\r\n batch_name, STATUS_DESCRIPTION_MAP[batch_status]\r\n ),\r\n recipients=meta['notification_email_recipients'],\r\n body=html_full_string, body_format='HTML'\r\n )\r\n return html_full_string", "def send_contact_notification():\n logging.info(\"Mail sending..\")\n notifications = Notification.query.filter_by(email_sent=False, user_notification=True).all()\n count = 0\n for notification in notifications:\n user_id = notification.user_id\n # fetch user mail from User service\n try:\n # print('request to:',f\"http://{os.environ.get('GOS_USER')}/user?id={user_id}\")\n resp = requests.get(f\"http://{os.environ.get('GOS_USER')}/user?id={user_id}\")\n if resp.status_code != 200:\n logging.error(f\"[{resp.status_code}] Mail task, User service replied with error {resp.json()}\")\n continue\n email = resp.json()['email']\n except Exception as e:\n # if user requests fails, we'll try to send email at next task trigger\n logging.error(e)\n continue\n if email is not None and email.strip() != '':\n # send email\n date = notification.date.strftime('%Y-%m-%d at %H:%M')\n template = env.get_template('./mail_notification.html')\n output = template.render(dest=resp.json(), date=date)\n pos_outcome = send_email(email, output)\n if pos_outcome:\n notification.email_sent = True\n db.session.commit()\n logging.info(f\"Email to {email} just sent\")\n count += 1\n else:\n logging.error(f\"Error while sending email to {email}\")\n\n logging.info(f'{count} email(s) sent')", "def email_body_new_proposal_notification_to_seller(meeting, buyer_name, buyer_profile_id):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\">\\n<tbody>\\n\\t<tr><td align=\"center\" valign=\"top\">\\n\\t</td></tr>\\n</tbody>\\n</table>'\n\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\">'\n\tmsg = msg + '\\n<tbody><tr>'\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tbody>'\n\tmsg = msg + '\\n\\t\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\n\\t\\t\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" />'\n\tmsg = msg + '\\n\\t\\t\\t\\t</a>'\n\tmsg = msg + '\\n\\t\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t\\t</tbody>'\n\tmsg = msg + '\\n\\t</table>'\n\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;padding-left:75px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\n\\t\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">'\n\tmsg = msg + '\\n\\t\\t\\t\\tGreat! You received a new proposal from <a href=\\\"https://127.0.0.1:5000/profile?hero=' + buyer_profile_id + '\\\" style=\"color:#1488CC\">'+ buyer_name + '</a>.'\n\tmsg = msg + '\\n\\t\\t\\t\\t<br><br><br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tTime: ' + meeting.meet_ts.strftime('%A, %b %d, %Y %H:%M %p') + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tDuration: ' + meeting.get_duration_in_hours() + ' hours<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tLocation: ' + str(meeting.meet_location) + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tFee: $' + str(meeting.meet_cost) + '<br><br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tDescription: ' + meeting.get_description_html() + '<br><br>'\n\tmsg = msg + '\\n\\t\\t\\t</font><br><br>'\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:10px;padding-left:75px;padding-bottom:150px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\n\\t\\t\\t<a href=\\\"'+ meeting.accept_url() +'\\\" style=\"color:#ffffff;text-decoration: none;display: inline-block;min-height: 38px;line-height: 39px;padding-right: 16px;padding-left: 16px;background: #1488CC;font-size: 14px;border-radius: 3px;border: 1px solid #1488CC;font-family:Garamond, EB Garamond, Georgia, serif; width:50px;text-align:center;\" target=\"_blank\">Accept</a> '\n\tmsg = msg + '\\n\\t\\t\\t<a href=\\\"'+ meeting.reject_url() +'\\\" style=\"color:#ffffff;text-decoration: none;display: inline-block;min-height: 38px;line-height: 39px;padding-right: 16px;padding-left: 16px;background: #e55e62;font-size: 14px;border-radius: 3px;border: 1px solid #e55e62;font-family:Garamond, EB Garamond, Georgia, serif; width:50px;text-align:center\" target=\"_blank\">Reject</a> '\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t</table>'\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\n\\t\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\n\\t\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\n\\t\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t</table>'\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\n\\t\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t</table>'\n\n\tmsg = msg + '\\n</tr></tbody>'\n\tmsg = msg + '</table>'\n\treturn msg", "def send_welcome_email(username: str, email: str) -> None:\n\n # TODO ...\n # Load html templates and get the content from it.\n # html_content = ...\n\n content = f\"<h1>Welcome to app, {username}</h1>\"\n email = sender.create_email(\n to_list=[email],\n subject=f\"Welcome from {{ app }}\",\n html_content=content,\n )\n sender.send_email(email_to_send=email)", "def send_email(self, text):\n msg_text = MIMEText(text)\n msg_text['Subject'] = '[WebSite Watchdog] Failure'\n msg_text['From'] = self.from_email\n msg_text['To'] = self.to_email\n \n s = smtplib.SMTP(self.smtp_server)\n s.sendmail(self.from_email, [self.to_email], msg_text.as_string())\n s.quit()", "def GetMimeMessage(service, user_id, msg_id, idx):\n try:\n message = service.users().messages().get(userId=user_id, id=msg_id,\n format='raw').execute()\n\n msg_str = base64.urlsafe_b64decode(message['raw'].encode('ASCII'))\n mail = mailparser.parse_from_bytes(msg_str)\n\n msg_str = str(mail.text_plain)\n msg_str = msg_str.strip(\"\")\n msg_str = clean_text(msg_str)\n msg_str = preprocess(msg_str)\n\n #print(msg_str)\n\n except errors.HttpError:\n print('An error occurred:')\n\n try:\n met = service.users().messages().get(userId=user_id, id=msg_id, format='metadata').execute()\n\n pay = met['payload']\n head = pay['headers']\n sub=\"\"\n for h in head:\n if (h['name'] == 'Subject'):\n sub = \"Subject: \"+str(h['value'])\n except errors.HttpError:\n print('An error occurred:')\n filename = \"./ham/email\"\n file_extension = \".txt\"\n new_fname = \"{}-{}{}\".format(filename, idx, file_extension)\n #print(new_fname)\n f= open(new_fname,\"w+\")\n f.write(sub+\"\\n\")\n f.write(msg_str)\n f.close()", "def _render_mail(self, rebuild, success, canceled):\n subject_template = 'Image %(image)s; Status %(endstate)s; Submitted by %(user)s'\n body_template = '\\n'.join([\n 'Image: %(image)s',\n 'Status: %(endstate)s',\n 'Submitted by: %(user)s',\n 'Logs: %(logs)s',\n ])\n\n endstate = None\n if canceled:\n endstate = 'canceled'\n else:\n endstate = 'successful' if success else 'failed'\n url = None\n if self.url and self.workflow.openshift_build_selflink:\n url = urljoin(self.url, self.workflow.openshift_build_selflink + '/log')\n\n formatting_dict = {\n 'image': self.workflow.image,\n 'endstate': endstate,\n 'user': '<autorebuild>' if rebuild else self.submitter,\n 'logs': url\n }\n return (subject_template % formatting_dict, body_template % formatting_dict)", "def sendNotificationEmail(fundName, status, message):\n\tgetSubject = lambda fundName, status: \\\n\t\tfundName + ' auto update succesful' \\\n\t\tif status == Constants.STATUS_SUCCESS else \\\n\t\tfundName + ' auto update failed'\n\n\tlogger.debug('sendNotificationEmail(): {0}'.format(fundName))\n\tsendMail( message\n\t\t\t, getSubject(fundName, status)\n\t\t\t, getMailSender()\n\t\t\t, getNotificationMailRecipients()\n\t\t\t, getMailServer()\n\t\t\t, getMailTimeout())", "def update_helpdesk(self, data):\n self.sr=data\n try:\n from email.mime.text import MIMEText\n from email.mime.multipart import MIMEMultipart\n except Exception, imperr:\n print(\"emailNotify failure - import error %s\" % imperr)\n return(-1)\n nHtml = []\n noHtml = \"\"\n clientEmail = ['helpdesk@mscsoftware.com']\n msg = MIMEMultipart()\n # This is the official email notifier\n rtUser = 'DONOTREPLY@mscsoftware.com'\n\n msg['From'] = rtUser\n msg['To'] = \", \".join(clientEmail)\n if self.data['groupid'] == 'Nastran-RG':\n msg[\"Cc\"] = \"msc-itsupport@mscsoftware.com,\\\n DL-ENG-BUILD@mscsoftware.com,\\\n raj.behera@mscsoftware.com\"\n elif self.data['groupid'] == 'Patran-RG':\n msg[\"Cc\"] = \"msc-itsupport@mscsoftware.com,\\\n DL-ENG-BUILD@mscsoftware.com,\\\n raj.behera@mscsoftware.com\"\n else: \n msg[\"Cc\"] = \"msc-itsupport@mscsoftware.com,\\\n DL-ENG-BUILD@mscsoftware.com,\\\n raj.behera@mscsoftware.com\"\n\n msg['Subject'] = 'Your Request SR# %s for VM provisioning \\\n reported failure for product %s' % \\\n\t\t\t ( self.sr['requestNumber'], pdict[self.data['groupid']] )\n nHtml.append(\"<html> <head></head> <body> <p>Jenkin's \\\n vCAC cloud client notification<br>\")\n nHtml.append(\"<b>Hi Helpdesk,</b><br><br><br>\")\n nHtml.append(\"Please create a ticket to solve \\\n the following problem and notify infra team.\")\n nHtml.append(\"VM creation readiness from vCAC cloud \\\n is reported failure, \\\n Product is <b>%s</b> is stuck.\" \\\n % pdict[self.data['groupid']])\n\n nHtml.append(\"Regression test for product <b>%s</b> \\\n is impacted.<br><br>\" % pdict[self.data['groupid']])\n if os.path.isdir(self.data['rundir']):\n jnfilepath=os.path.join(self.data['rundir'], 'hudjobname.dat')\n if os.path.isfile(jnfilepath):\n lines = [line.rstrip() for line in open(jnfilepath)]\n nHtml.append(\"Please follow job link for SR# \\\n related information.<br>\")\n nHtml.append(\"Jenkins Effected Job URL: \\\n <a href=%s> Effected Build \\\n Console</a><br><br><br>\" % (lines[0]))\n\n nHtml.append(\"This needs immediate attention.<br><br>\")\n nHtml.append(\"Regards,<br>\")\n nHtml.append(\"Rtest Administrator.<br>\")\n nHtml.append(\"[Note: This is an automated mail,\\\n Please do not reply to this mail.]<br>\")\n nHtml.append(\"</p> </body></html>\")\n noHtml = ''.join(nHtml)\n noBody = MIMEText(noHtml, 'html')\n msg.attach(noBody)\n s = smtplib.SMTP('postgate01.mscsoftware.com')\n s.sendmail(rtUser, [clientEmail] + \\\n msg[\"Cc\"].split(\",\"), msg.as_string())\n s.quit()\n return 0", "def delegate_about_event():\n\n regs = Registration.objects.all()\n\n template = 'notifications/sprints_about_mail.html'\n\n for reg in regs:\n subject = 'SciPy.in 2011: Details of the individual events'\n message = loader.render_to_string(\n template, dictionary={'name': reg.registrant.username})\n\n reg.registrant.email_user(subject=subject, message=message,\n from_email='madhusudancs@gmail.com')", "def payment_instructions_email_notification(sender, **kwargs):\n subject_template_name = \\\n 'shop_simplenotifications/payment_instructions_subject.txt'\n body_text_template_name = \\\n 'shop_simplenotifications/payment_instructions_body.txt'\n body_html_template_name = \\\n 'shop_simplenotifications/payment_instructions_body.html'\n \n request = kwargs.get('request')\n order = kwargs.get('order')\n \n emails = []\n if order.user and order.user.email: \n emails.append(order.user.email)\n if request and get_billing_address_from_request(request):\n address = get_billing_address_from_request(request)\n if hasattr(address, 'email'):\n emails.append(address.email)\n emails = list(set(emails)) # removes duplicated entries\n if emails:\n subject = loader.render_to_string(\n subject_template_name,\n RequestContext(request, {'order': order})\n )\n subject = subject.join(subject.splitlines())\n\n text_content = loader.render_to_string(\n body_text_template_name,\n RequestContext(request, {'order': order})\n )\n\n try:\n html_content = loader.render_to_string(\n body_html_template_name,\n RequestContext(request, {'order': order})\n )\n except TemplateDoesNotExist:\n html_content = None\n\n from_email = getattr(settings, 'SN_FROM_EMAIL',\n settings.DEFAULT_FROM_EMAIL)\n\n message = EmailMultiAlternatives(subject, text_content, from_email,\n emails)\n if html_content:\n message.attach_alternative(html_content, \"text/html\")\n message.send()", "def get_notification_template(self):\n if self.db_config_file.key_exists(\"notification_template_file\"):\n filename = self.db_config_file_value(\"notification_template_file\").strip('\"')\n return open(filename, 'rt').read()\n\n return get_data(\"asebackupcli\", \"notification.json\")", "def send_mail_to_student(student, param_dict):\r\n\r\n # add some helpers and microconfig subsitutions\r\n if 'course' in param_dict:\r\n param_dict['course_name'] = param_dict['course'].display_name_with_default\r\n\r\n param_dict['site_name'] = microsite.get_value(\r\n 'SITE_NAME',\r\n param_dict['site_name']\r\n )\r\n\r\n subject = None\r\n message = None\r\n\r\n # see if we are running in a microsite and that there is an\r\n # activation email template definition available as configuration, if so, then render that\r\n message_type = param_dict['message']\r\n\r\n email_template_dict = {\r\n 'allowed_enroll': (\r\n 'emails/enroll_email_allowedsubject.txt',\r\n 'emails/enroll_email_allowedmessage.txt'\r\n ),\r\n 'enrolled_enroll': (\r\n 'emails/enroll_email_enrolledsubject.txt',\r\n 'emails/enroll_email_enrolledmessage.txt'\r\n ),\r\n 'allowed_unenroll': (\r\n 'emails/unenroll_email_subject.txt',\r\n 'emails/unenroll_email_allowedmessage.txt'\r\n ),\r\n 'enrolled_unenroll': (\r\n 'emails/unenroll_email_subject.txt',\r\n 'emails/unenroll_email_enrolledmessage.txt'\r\n ),\r\n 'add_beta_tester': (\r\n 'emails/add_beta_tester_email_subject.txt',\r\n 'emails/add_beta_tester_email_message.txt'\r\n ),\r\n 'remove_beta_tester': (\r\n 'emails/remove_beta_tester_email_subject.txt',\r\n 'emails/remove_beta_tester_email_message.txt'\r\n ),\r\n }\r\n\r\n subject_template, message_template = email_template_dict.get(message_type, (None, None))\r\n if subject_template is not None and message_template is not None:\r\n subject = render_to_string(subject_template, param_dict)\r\n message = render_to_string(message_template, param_dict)\r\n\r\n if subject and message:\r\n # Remove leading and trailing whitespace from body\r\n message = message.strip()\r\n\r\n # Email subject *must not* contain newlines\r\n subject = ''.join(subject.splitlines())\r\n from_address = microsite.get_value(\r\n 'email_from_address',\r\n settings.DEFAULT_FROM_EMAIL\r\n )\r\n\r\n send_mail(subject, message, from_address, [student], fail_silently=False)", "def main_email(name, total, answered, not_answered, declines, remaining):\n\n start = smtplib.SMTP(host=HOST, port=PORT)\n start.starttls()\n start.login(ADDRESS, PASSWORD)\n\n date = datetime.datetime.now()\n date_now = date.strftime(\"%m-%d-%Y\")\n\n print_list, email_dict = simple_contacts('contacts.txt')\n\n emails = get_emails(print_list, email_dict)\n\n message_template = read_template()\n\n for mail in emails:\n pretty_print(f\"Sending email to {mail}\", \"!\")\n msg = MIMEMultipart()\n\n message = message_template.substitute(PERSON_NAME=name, DATE=date_now, TOTAL_CALLED=total, ANSWERED=answered, NOT_ANSWERED=not_answered, DECLINES=declines, REMAINING=remaining)\n\n msg['From'] = ADDRESS\n msg['To'] = mail\n msg['Subject'] = f\"{name} - Calling Campaign Summary - {date_now}\"\n\n msg.attach(MIMEText(message, 'plain'))\n start.send_message(msg)\n pretty_print(f\"Mail sent to {mail}\", \"!\")\n\n del msg\n\n start.quit()", "def send_email(service, user_id, message):\r\n try:\r\n message = (service.users().messages().send(userId=user_id, body=message).execute())\r\n return message\r\n except Exception as e:\r\n print(\"err: problem sending email\")\r\n print(e)", "def _set_email(settings, excel):\n weekend = excel.week_end()\n week_start = excel.week_start()\n dest = [str(settings.user), str(settings.dest_addr)]\n msg = MIMEMultipart('alternative')\n msg['Subject'] = 'Time sheet'\n msg['From'] = settings.user\n msg['To'] = \", \".join(dest)\n\n body = \"\"\"Howdy,\\n\\nHere is my time sheet from %s to %s. Have a good weekend!\\n\\nThanks,\\n\\n%s\"\"\" %\\\n (week_start, weekend, settings.name)\n\n derp = MIMEText(body, 'plain')\n msg.attach(derp)\n\n attach = MIMEBase('application', \"octet-stream\")\n attach.set_payload(open(settings.active_xl, \"rb\").read())\n encoders.encode_base64(attach)\n attach.add_header('Content-Disposition', 'attachment; filename=\"Timesheet ' + settings.name + '.xlsx\"')\n\n msg.attach(attach)\n\n return msg", "def email_body_appointment_confirmation_for_seller(meeting, buyer_profile, sellr_profile, msg_user_link='https://INSPRITE.co/message/USER'):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">Fantastic! You accepted <a href=\"https://127.0.0.1:5000/profile?' + buyer_profile.prof_id + '\" style=\"color:#1488CC\">' + buyer_profile.prof_name + '\\'s proposal.</a><br><br>'\n\tmsg = msg + '\\t\\t\\t Check out the details:<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tTime: ' + meeting.meet_ts.strftime('%A, %b %d, %Y %H:%M %p') + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tDuration: ' + meeting.get_duration_in_hours() + ' hours<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tLocation: ' + str(meeting.meet_location) + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tFee: $' + str(meeting.meet_cost) + '<br><br>'\n\tmsg = msg + '\\t\\t\\t Need to edit, manage or update the appointment? <a href=\"https://127.0.0.1:5000/dashboard\" style=\"color:#1488CC\">Go for it</a>, or send <a href=\"' + msg_user_link + '\" style=\"color:#1488CC\"> ' + buyer_profile.prof_name + ' a message.</a><br><br>We know life can be busy, so we\\'ll send you a reminder 24 hours in advance too.</font>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;padding-left:75px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\n\\t\\t\\t<img style=\"padding-right: 6px\" src=\"http://maps.googleapis.com/maps/api/staticmap?center=' + meeting.meet_location + '&zoom=15&size=400x450&markers=size:large%8Ccolor:0xFFFF00%7Clabel:Insprite%7C' + meeting.meet_location + '\"><br>'\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t</table>'\n\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<font style=\"font-family:Helvetica Neue;color:#555555;font-size:10px;\"><a href=\"mailto:thegang@insprite.co\" style=\"color:#1488CC\">Contact Us</a> '\n\tmsg = msg + '| Sent by <a href=\"https://insprite.co\" style=\"color:#1488CC\">Insprite</a>, California, USA. | <a href=\"#\" style=\"color:#1488CC\">Unsubscribe</a></font><br>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr> <td style=\"border-top: 0px solid #333333; border-bottom: 0px solid #FFFFFF;\">'\n\tmsg = msg + '<img width=\"596px\" src=\"http://ryanfbaker.com/insprite/footerImage.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def compose_email(self, donation_id=-1, write_totals=False):\n if not write_totals:\n amount = self.donations[donation_id]\n else:\n amount = self.total_donations\n email_string = f\"\\nDear {self.name},\\n Thank you for your generous\\\n gift of ${amount:.2f}! It will help Local Charity achieve our mission.\\n\\\n Best regards,\\n\\\n Local Charity\\n\\n\"\n return email_string", "def send_alert_email(email: str, username: str, attribute: str,\n value: Union[int,float], recorded_date: datetime,\n sensor_id: str, threshold: Union[int, float], \n alert_description: str) -> bool:\n\n sensor = Sensor.get_by_id(sensor_id)\n sensor_lat, sensor_lon = None, None\n if sensor:\n loc = Location.get_by_id(sensor.l_id)\n if loc:\n sensor_lat = loc.lat\n sensor_lon = loc.lon\n else:\n logger.info(\"Could not include sensor information in alert \"\n \"email as sensor with ID {} does not \"\n \"exist\".format(sensor_id))\n\n if alert_description == \"exceeded\":\n diff = value - threshold\n else:\n diff = threshold - value\n\n text_version = GetConfig.configure('alert', 'text_template').format(\n username=username, attribute=attribute, threshold=threshold,\n sensor_id=sensor_id, lat=sensor_lat, lon=sensor_lon,value=value,\n recorded_date=recorded_date, verb=alert_description, diff=diff)\n\n html_version = flask.render_template(\n GetConfig.configure('alert', 'html_template'), username=username,\n attribute=attribute, threshold=threshold, sensor_id=sensor_id,\n lat=sensor_lat, lon=sensor_lon,value=value,\n recorded_date=recorded_date, verb=alert_description, diff=diff)\n\n sg = sendgrid.SendGridAPIClient(apikey=GetConfig.configure('sendgrid',\n 'api_key'))\n\n from_email = Email(GetConfig.configure('alert', 'sender_email'))\n to_email = Email(email)\n subject = GetConfig.configure('alert', 'email_subject')\n content_text = Content(\"text/plain\", text_version)\n send_alert = Mail(from_email, subject, to_email, content_text)\n content_html = Content(\"text/html\", html_version)\n send_alert.add_content(content_html)\n\n try:\n email_response = sg.client.mail.send.post(\n request_body=send_alert.get())\n logger.info(\"Sent alert email to {} with \"\n \"response code : {}\".format(email,\n email_response.status_code))\n return True\n except http.client.IncompleteRead as e:\n logger.error(\"Sendgrid API Key may not be set correctly or be \"\n \"invalid\", e)\n return False", "def send_email(msg):\n\tprint(\"sendEmail: \" + msg)", "def send_notification(semester):\n\n\tfrom University import Student\n\t#query to retrieve students\n\tstudents = Student.query.filter_by(sem = semester).all()\n\t#keep only the email-id of the students\n\tstudents = list(map(lambda x : x.email, students))\n\t#print('sending Message', students)\n\tmsg = Message('Meeting Schedule Notification.',\n\t\t\tsender = 'pesfacultyadvisor.sepro2017@gmail.com',\n\t\t\trecipients = students)\n\t#print('Object created!')\n\tmsg.body = \"Dear Student\\n A meeting is scheduled on so and so date.\\n We request you to attend the meeting.\"\n\tmail.send(msg)\n\n\treturn \"Notification Sent!!\"", "def send_warning(self):\n\n # Check whether all the necessary parameters for SMS are present\n if self.your_phone != '' and self.twilio_phone != '' and self.account_sid != '' and self.auth_token != '':\n client = Client(self.account_sid, self.auth_token)\n\n try:\n sms = client.messages.create(\n body=\"\"\"Last will: It was at least 30 days since your last check in. \n This is a reminder to check in in the next 24 hours.\"\"\",\n from_=self.twilio_phone,\n to=self.your_phone)\n sms\n print(\"\\nSMS sent\")\n except Exception as e:\n print(f\"An error occurred while trying to send the SMS. Error: {e}\")\n\n else:\n print(\"\\nMissing SMS parameters. SMS not sent\")\n\n # Check whether all the necessary parameters for email are present\n if self.sender_name != '' and self.recipient_email != '' and self.email != '' and self.email_pwd != '':\n message = f\"\"\"It has been at least 30 days since you last checked in. \nYou need to check in in the next 24 hours.\\n\nOtherwise at {self.deadline} the email with the important info will be sent to the designated recipient.\\n\nIn order to reset simply go to the working directory and run python3 last_will.py\"\"\"\n\n # send_email will return 0 if everything went ok, otherwise it will return an error message\n status = send_email(self.sender_name, self.your_email,\n self.email, self.email_pwd,\n subject='Last will: Reminder to check in', unencrypted_message=message)\n\n if status != 0:\n print(status)\n exit(1)\n else:\n print(\"Email sent\\n\")\n\n print(f\"You have until {self.deadline} to check in. \"\n f\"In order to do that simply go to the working directory and run ./last_will.sh\\n\")\n else:\n print(\"Missing email parameters. Email not sent.\\n\")\n exit(1)", "def _email_document(document, to, template='django_dms/email.txt', subject=''): \n # TODO: A really cool system would delay sending the email for 10 seconds or so, \n # to allow the user to quickly undo :-) This could probably also be done client-side (ie JS)\n # Create the message\n message = EmailMessage(to=to, subject=subject)\n message.to = to\n message.subject = subject\n message.body = render_to_string(template, {'document': document})\n message.attach(document.friendly_filename, document.file.read(), document.file_mimetype)\n\n # Send the message\n message.send()", "def storage_request_notify(request, project):\n subject = 'Storage request received: {0}'.format(\n project.title)\n\n content = {'project': project,\n 'domain': get_current_site(request),\n 'url_prefix': get_url_prefix(request),\n 'signature': settings.EMAIL_SIGNATURE,\n 'project_info': email_project_info(project),\n 'footer': email_footer(), 'SITE_NAME': settings.SITE_NAME}\n\n content['name'] = \"Colleague\"\n body = loader.render_to_string(\n 'notification/email/storage_request_notify_team.html', content)\n\n send_mail(subject, body, settings.DEFAULT_FROM_EMAIL,\n [settings.CONTACT_EMAIL], fail_silently=False)", "def build_message():\n outgoing_mail = Mail()\n outgoing_mail.from_email = Email(email_from_address, email_from_name)\n outgoing_mail.subject = subject\n personalization = Personalization()\n for recipient in email_to_addresses:\n personalization.add_to(Email(recipient))\n outgoing_mail.add_personalization(personalization)\n outgoing_mail.add_content(Content(\"text/plain\", str.join('\\n', _log)))\n outgoing_mail.add_content(Content(\"text/html\", \"<html><body> {} </body></html>\".format(str.join(' <br /> ', _log))))\n return outgoing_mail.get()", "def task_rescheduled_notify(name, attempts, last_error, date_time, task_name, task_params):\n body = loader.render_to_string(\n 'notification/email/notify_rescheduled_task.html', {\n 'name': name,\n 'attempts': attempts,\n 'last_error': last_error,\n 'date_time': date_time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n 'task_name': task_name,\n 'task_params': task_params,\n 'signature': settings.EMAIL_SIGNATURE\n })\n subject = name + \" has been rescheduled\"\n mail_admins(subject, body, settings.DEFAULT_FROM_EMAIL)", "def send_email(client, smtp, server_list):\n content = create_body_html(client, server_list)\n\n # Email Configuration\n message = MIMEMultipart(\"alternative\")\n message[\"Subject\"] = '[Cyberwatch] Servers recovered from \"Communication failure\" report - ' + \\\n date.today().strftime(\"%m/%d/%y\")\n message[\"From\"] = smtp[\"sender\"]\n message[\"To\"] = \", \".join(EMAIL_RECEIVERS)\n\n # Get Period start date with \"Last Modified\" time of file\n start_date = datetime.fromtimestamp(os.path.getmtime(os.path.dirname(\n __file__) + '/communication_failure_list.txt')).strftime(\"%d/%m/%Y, %H:%M\")\n\n email_body = f\"\"\"\\\n <p>Greetings,</p>\n\n <p>Please find in the following section, a list of servers that recovered from the status\n \"Communication failure\".</p>\n\n <span style=\"color:#4bb9f1;font-size:18px;align:center\"><strong>Servers recovered from \"Communication Failure\"\n between {start_date} and {datetime.now().strftime(\"%d/%m/%Y, %H:%M\")}</strong></span>\n <br />\n\n <br />{content}<br />\n\n <p>The Cyberwatch Team - support@cyberwatch.fr</p>\n \"\"\"\n\n # Add HTML/plain-text parts to MIMEMultipart message\n # The email client will try to render the last part first\n message.attach(MIMEText(email_body, \"plain\"))\n message.attach(MIMEText(email_body, \"html\"))\n\n # Create secure connection with server and send email\n context = ssl.create_default_context()\n with smtplib.SMTP_SSL(smtp[\"server\"], smtp[\"port\"], context=context) as server:\n server.login(smtp[\"login\"], smtp[\"password\"])\n server.sendmail(\n smtp[\"sender\"], EMAIL_RECEIVERS, message.as_string()\n )\n\n print(\"Successfully sent email to {}\".format(message[\"To\"]))", "def delegate_last_day():\n\n regs = Registration.objects.all()\n\n template = 'notifications/last_day_mail.html'\n\n for reg in regs:\n subject = 'SciPy.in 2011: Schedule and other details'\n message = loader.render_to_string(\n template, dictionary={'name': reg.registrant.username})\n\n reg.registrant.email_user(subject=subject, message=message,\n from_email='madhusudancs@gmail.com')" ]
[ "0.7191147", "0.6990005", "0.69212645", "0.65836084", "0.64851093", "0.6402051", "0.6330503", "0.6326159", "0.62814146", "0.6238903", "0.62185687", "0.6207358", "0.6139531", "0.60843295", "0.60813093", "0.60735154", "0.6054305", "0.60353315", "0.6029179", "0.60170865", "0.5988139", "0.5963002", "0.5927257", "0.59234124", "0.5922753", "0.59185773", "0.59153867", "0.59122175", "0.589498", "0.5891763", "0.58896124", "0.58768576", "0.58633626", "0.5849663", "0.58493704", "0.5834656", "0.58035547", "0.5793585", "0.5778664", "0.57780933", "0.5752655", "0.57470447", "0.57203394", "0.5718362", "0.5715908", "0.5709096", "0.57063377", "0.56992775", "0.56954503", "0.5694687", "0.5692121", "0.56911325", "0.5685465", "0.5684614", "0.5679292", "0.56766254", "0.56765527", "0.5673096", "0.56729865", "0.56725353", "0.5671756", "0.5665913", "0.5662215", "0.5648676", "0.5641542", "0.5640984", "0.56404465", "0.5625384", "0.5625384", "0.56232995", "0.5622975", "0.5619839", "0.5614214", "0.56139207", "0.5612813", "0.560958", "0.5608849", "0.5604938", "0.5601636", "0.55953753", "0.55893433", "0.55837345", "0.5582717", "0.55773246", "0.5568481", "0.5566484", "0.55652285", "0.5558264", "0.5556392", "0.5548222", "0.5547093", "0.5536598", "0.55362254", "0.55318946", "0.55317146", "0.5524801", "0.5523046", "0.55144274", "0.5497572", "0.5486166" ]
0.74859136
0
Create a formatted email message to the worker when a hirer makes an evaluation template alerts/worker_evaluated.txt
def alert_worker_evaluated(hirer,worker): message = loader.get_template( 'alerts/worker_evaluated.txt').render( {'worker': worker, 'hirer': hirer}) return message
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_created_email(self):\n if settings.NOTIFY_NEW_REG:\n to = settings.NOTIFY_NEW_REG\n message = \"\"\"\\\nGreetings,<br><br>\n\nA new vehicle registration has been submitted by %s.<br><br>\n\nGo here to view or edit the request: <br>\n<a href=\"%s\">%s</a>\n<br><br>\nSincerely,<br><br>\nThe Janelia Parking Permit Program\n \"\"\" % (self.user_display_name(), self.get_edit_url(True), self.get_edit_url(True))\n subject = 'A new parking permit request has been entered'\n from_email = 'parkingpermit-donotreply@janelia.hhmi.org'\n text_content = re.sub(r'<[^>]+>','',message)\n html_content = message\n msg = EmailMultiAlternatives(subject, text_content, from_email, to)\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()", "def alert_new_service_notification(hirer, worker, service):\n\n domain = Site.objects.get_current().domain\n url = \"http://\" + domain + \"/worker/\"\n\n message = loader.get_template(\n 'alerts/new_service_notification.txt').render(\n {'worker': worker, 'hirer': hirer, 'service': service, 'url':url})\n\n return message", "def generateNotifyMessage(self):\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n today = date.today()\n current_date = today.strftime(\"%B %d, %Y\")\n\n subject = \"Progam operating warning - Not Running\"\n body = \"Since \" + current_date + \" at \" + current_time \n msg = f'Subject: {subject} \\n\\n{body}'\n return msg", "def alert_subscription_message(request, user):\n message = loader.get_template(\n 'alerts/subscription_message.txt').render(\n {'user': user, 'evaluation_link': resolve_worker_evaluation_url(request, user)})\n\n return message", "def test_email_content():\n\n time_of_day = alerts.current_time()\n hostname = alerts.host_name()\n\n subject = \"Subject: Raspi-Sump Email Test\"\n message = \"Raspi-Sump Test Email\"\n\n return \"\\r\\n\".join(\n (\n f\"From: {configs['email_from']}\",\n f\"To: {configs['email_to']}\",\n f\"{subject}\",\n \"\",\n f\"{hostname} - {time_of_day} - {message}.\",\n )\n )", "def notify_email(kwargs):\n SMTP_mail_secret_name = \"\" # setting up your AWS secret name\n email_creds = aws.get_secret(SMTP_mail_secret_name, '[regoin]') # setting the regoin to credentials\n emailfrom = email_creds['accountname']\n emailsto = ['[mail receiver]'] # setting up mail receiver\n emailscc = ['[mail cc ]'] # setting up mail cc\n print(f\"Sender: {emailfrom}\")\n\n username = email_creds['username']\n password = email_creds['password']\n server = email_creds['server']\n print(f\"Server: {server}\")\n\n \"\"\"Send custom email alerts.\"\"\"\n print(\"kwargs >>>> \", kwargs)\n ti = kwargs['ti']\n dag_run = kwargs['dag_run']\n var = kwargs['var']['json']\n params = kwargs['params']\n print(f\"ti: {ti}\")\n print(f\"dag_run: {dag_run}\")\n\n ### Get exception then parsing it\n if kwargs.get('exception') is not None and type(kwargs.get('exception')) == list:\n dh_excpt = \"During handling of the above exception, another exception occurred:\"\n matching_main = [s for s in kwargs['exception'] if \"/main.py\" in s]\n print(\"matching_main >>>> \", matching_main)\n \n if matching_main != []:\n matching_fist_text = matching_main[0]\n print(\"matching_fist_text >>>> \", matching_fist_text)\n matching_fist_index = kwargs['exception'].index(matching_fist_text)\n print(\"matching_fist_index >>>> \", matching_fist_index)\n\n matching_last_text = matching_main[-1]\n print(\"matching_last_text >>>> \", matching_last_text)\n matching_last_index = kwargs['exception'].index(matching_last_text)\n print(\"matching_last_index >>>> \", matching_last_index)\n\n if dh_excpt in kwargs['exception']:\n dhe_index = kwargs['exception'].index(dh_excpt)\n print(\"The index of dhe >>>> \", dhe_index)\n\n if matching_fist_index < dhe_index:\n # when \"/main.py\" first show before \"During handling...\" then remove after \"During handling...\" text until the end\n kwargs['exception'][dhe_index:] = []\n elif matching_fist_index > dhe_index:\n # when \"/main.py\" first show after \"During handling...\" then remove after another text until the end\n kwargs['exception'][matching_last_index+2:] = []\n\n formatted_exception = \"\\n\".join(kwargs['exception'])\n print(f\"formatted_exception: {formatted_exception}\")\n elif kwargs.get('exception') is not None: \n formatted_exception = kwargs['exception']\n print(f\"formatted_exception: {formatted_exception}\")\n\n title = ''\n body = ''\n print(\"dag_run.run_id >>>> \", dag_run.run_id)\n print(\"ti.task_id >>>> \", ti.task_id)\n print(\"ti.state >>>> \", ti.state)\n\n print(\"When ti.state == State.FAILED >>>> \") # ti.state == State.FAILED as same as ti.state == 'failed'\n title = f\"[TEST] Airflow alert: ({dag_run.run_id}) failed on ({ti.task_id})\"\n body = f\"Dears, \\n\\n\\n\" + \\\n f\"The job_id ({dag_run.run_id}) failed on ({ti.task_id}). \\n\" + \\\n f\"Check what goes wrong, the ERROR message is shown as below: \\n\\n\" + \\\n f\"{formatted_exception} \\n\\n\" + \\\n f\"Forever yours, \\n\" + \\\n f\"RDP Data Team\"\n print(\"check title >>>> \\n\", title)\n print(\"check body >>>> \\n\", body)\n print(f\"Prepare to send out the mail...\\n\\t\\tsubject: {title}\") \n se.email(emailfrom, emailsto, emailscc, username, password, server, body, subject = title)\n print(\"The email send out done.\")\n raise AirflowException(f\"AirflowException: Pleaes check what goes wrong this job_id ({dag_run.run_id}) failed on ({ti.task_id}).\")", "def generate_email(mail, env):\n race, results, standings = get_last_results_and_standings()\n next_race = get_next_race()\n\n subject = f\"Race digest - F1 2021 | Round {race.round} | {race.name}\"\n body = (f\"Results:\\n{results}\\n\\nCurrent standings:\\n\"\n f\"{standings}\\n\\nNext race: {next_race}\")\n\n login_info = env['EMAIL_ADDRESS'], env['EMAIL_PASSWORD']\n\n subs = update_db_and_get_subs(mail, (env['EMAIL_ADDRESS'], env['EMAIL_PASSWORD']))\n\n for sub in subs:\n send_email(subject, body, sub, login_info)", "def _get_message_body(self, template_file, message_data):\r\n\r\n msg = \"\"\"\r\nThe import for user {username} has failed to import. The path to the import\r\nis:\r\n\r\n{file_path}\r\n\r\nError:\r\n\r\n{exc}\r\n\r\n\"\"\".format(**message_data)\r\n return msg", "def construct_email_content(self):\n # Construct header of the message\n content = MAIL_HEAD_CONTENT.replace(\"TITLE_HOLDER\", self.title).replace('FAIL_JOB_HOLDER',\n self.fail_job_content).replace(\n \"TIME_HOLDER\", os.getenv(\"START_TIME\")).replace(\"GRAPH_HOLDER\", os.getenv(\"BENCHMARK_GRAPH\")).replace(\n \"JOB_HOLDER\", os.getenv(\"BENCHMARK_TYPE\")).replace(\"DEVICE_HOLDER\", os.getenv(\"DEVICE_TYPE\")).replace(\"CUDA_HOLDER\", os.getenv(\"VERSION_CUDA\")).replace('DISPLAY', self.job_display)\n\n if not self.alarm_info:\n return\n # Construct alarm content\n content += self.alarm_info\n # Construct the tail of the message\n content += MAIL_TAIL_CONTENT.replace(\"BENCHMARK_WEBSITE1\", os.getenv(\"BENCHMARK_WEBSITE1\", \"\")).strip().replace(\n 'RUN_ENV_HOLDER', self.env_content).replace(\"BENCHMARK_WEBSITE2\", os.getenv(\"BENCHMARK_WEBSITE2\"))\n\n with open(os.path.join(self.log_path, \"mail.html\"), \"w\") as f_object:\n f_object.write(content)", "def create_email_body_for_update_resources(results):\n\n failures = [url for url in results if results[url][\"state\"] == \"Failed\"]\n warnings = [url for url in results if results[url][\"state\"] == \"Warning\"]\n successes = [url for url in results if results[url][\"state\"] == \"Succeeded\"]\n\n body, html_content = \"\", \"\"\n\n # Failed\n if failures:\n body += f\"Failed [{len(failures)}]\\n\\n\"\n html_content += f\"<h2>Failed [{len(failures)}]</h2>\\n\\n\"\n\n for url in failures:\n result = results[url]\n\n body += f\"Resource: {url}\\n\"\n html_content += f'<h3 style=\"color: red;\">Resource: {url}</h3>\\n'\n\n html_content += \"<ul>\\n\"\n for message in result[\"messages\"]:\n body += f\" {message}\\n\"\n html_content += f\"<li>{message}</li>\\n\"\n html_content += \"</ul>\\n\"\n body += \"\\n\\n\"\n\n # Warnings\n if warnings:\n body += f\"Warnings [{len(warnings)}]\\n\\n\"\n html_content += f\"<h2>Failed [{len(warnings)}]</h2>\\n\\n\"\n\n for url in warnings:\n result = results[url]\n\n body += f\"Resource: {url}\\n\"\n html_content += f'<h3 style=\"color: orange;\">Resource: {url}</h3>\\n'\n\n html_content += \"<ul>\\n\"\n for message in result[\"messages\"]:\n body += f\" {message}\\n\"\n html_content += f\"<li>{message}</li>\\n\"\n html_content += \"</ul>\\n\"\n body += \"\\n\\n\"\n\n # Succeeded\n if successes:\n body += f\"Succeeded [{len(successes)}]\\n\\n\"\n html_content += f\"<h2>Succeeded [{len(successes)}]</h2>\\n\"\n\n for url in successes:\n result = results[url]\n\n body += f\"Resource: {url}\\n\"\n html_content += f'<h3 style=\"color: green;\">Resource: {url}</h3>\\n'\n\n html_content += \"<ul>\\n\"\n for message in result[\"messages\"]:\n body += f\" {message}\\n\"\n html_content += f\"<li>{message}</li>\\n\"\n html_content += \"</ul>\\n\"\n body += \"\\n\\n\"\n\n body_html = f\"\"\"\n<!DOCTYPE html>\n<html lang=\"en\">\n <head>\n <title>Updated BEL Resources for {settings.HOST_NAME}</title>\n </head>\n <body>\n <div id=\"content\">{html_content}</div>\n </body>\n</html>\n \"\"\"\n\n return (body, body_html)", "def email_body_review_reminder():\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr>td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;padding-left:75px; padding-right:75px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t <font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">We hope you had a great appointment!<br>'\n\tmsg = msg + '\\t\\t\\t Your opinion goes a long way&mdash;write up your review of the appointment so others can learn from your experience with <a href=\"#\" style=\"color:#1488CC\">{user\\'s name}</a></font><br><br>'\n\tmsg = msg + '\\t</td></tr>'\n\n\tmsg = msg + '<td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:10px;padding-left:75px;padding-bottom:200px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '<a href=\"#\" style=\"color:#ffffff;text-decoration: none;display: inline-block;min-height: 38px;line-height: 39px;padding-right: 16px;padding-left: 16px;background: #1488CC;font-size: 14px;border-radius: 3px;border: 1px solid #1488CC;font-family:Garamond, EB Garamond, Georgia, serif; width:100px;text-align:center;\" target=\"_blank\">Rate & Review</a>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def email_body_meeting_reminder():\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">Drats. <a href=\"#\" style=\"color:#1488CC\">{insert seller name} cancelled your appointment</a>.<br><br>'\n\tmsg = msg + '\\t\\t\\t <a href=\"#\" style=\"color:#1488CC\">Reschedule</a> or you can send a message to inquire about the cancellation. <br><br>'\n\tmsg = msg + '\\t\\t\\t And, don\\'t worry! You won\\'t be charged, promise. </font><br><br>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def task_rescheduled_notify(name, attempts, last_error, date_time, task_name, task_params):\n body = loader.render_to_string(\n 'notification/email/notify_rescheduled_task.html', {\n 'name': name,\n 'attempts': attempts,\n 'last_error': last_error,\n 'date_time': date_time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n 'task_name': task_name,\n 'task_params': task_params,\n 'signature': settings.EMAIL_SIGNATURE\n })\n subject = name + \" has been rescheduled\"\n mail_admins(subject, body, settings.DEFAULT_FROM_EMAIL)", "def make_email_message(itrf_begin, epoch_begin, itrf_final, epoch_final, velocity, date):\n\n message = \"Estimado Usuario,\\n\\nEn adjunto encontrará los resultados de la transformacion ITRF de acuerdo a la siguiente configuración:\\n\\nITRF inicial: \"+str(itrf_begin)+\"\\nEpoca inicial: \"+str(epoch_begin)+\"\\nITRF final: \"+str(itrf_final)+\"\\nEpoca final: \"+str(epoch_final)+\"\\nModelo de velocidad: \"+velocity+\"\\nFecha de la solicitud de la transformación: \"+date+\"\\n\\n\\nSaludos Cordiales,\\n\\nEquipo de Geodesia del IGVSB.\"\n return message", "def _render_mail(self, rebuild, success, canceled):\n subject_template = 'Image %(image)s; Status %(endstate)s; Submitted by %(user)s'\n body_template = '\\n'.join([\n 'Image: %(image)s',\n 'Status: %(endstate)s',\n 'Submitted by: %(user)s',\n 'Logs: %(logs)s',\n ])\n\n endstate = None\n if canceled:\n endstate = 'canceled'\n else:\n endstate = 'successful' if success else 'failed'\n url = None\n if self.url and self.workflow.openshift_build_selflink:\n url = urljoin(self.url, self.workflow.openshift_build_selflink + '/log')\n\n formatting_dict = {\n 'image': self.workflow.image,\n 'endstate': endstate,\n 'user': '<autorebuild>' if rebuild else self.submitter,\n 'logs': url\n }\n return (subject_template % formatting_dict, body_template % formatting_dict)", "def template_message(include_title=False, template='markdown.md.j2', exclude_labels=True, current_length=0, **kwargs):\n processed = {'message': ''}\n alerts_count = len(kwargs['alerts'])\n title = f\"{alerts_count} alert(s) received\"\n if not include_title:\n processed.update({'title': f\"{title}\"})\n title = None\n processed['message'] = render_template(\n template,\n title=title,\n alerts=kwargs['alerts'],\n external_url=kwargs['external_url'],\n receiver=kwargs['receiver'],\n exclude_labels=exclude_labels,\n current_length=current_length,\n )\n for alert in kwargs['alerts']:\n if int(alert['annotations'].get('priority', -1)) > processed.get('priority', -1):\n processed['priority'] = int(alert['annotations']['priority'])\n return processed", "def task_send_reminder_email():\n send_reminder_email()\n logger.info(\"Sent reminder email\")", "def email_body_meeting_rejected_notification_to_seller(meeting, buyer_name, buyer_prof_id):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\">\\n<tbody>\\n\\t<tr>\\n\\t\\t<td align=\"center\" valign=\"top\">\\n\\t\\t</td>\\n\\t</tr>\\n</tbody>\\n</table>\\n\\n'\n\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\">'\n\tmsg = msg + '\\n<tbody>'\n\tmsg = msg + '\\n\\t<tr>'\n\n\tmsg = msg + '\\n\\t\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t\\t<tbody>'\n\tmsg = msg + '\\n\\t\\t\\t\\t<tr>'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t<td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t</td>'\n\tmsg = msg + '\\n\\t\\t\\t\\t</tr>'\n\tmsg = msg + '\\n\\t\\t\\t</tbody>'\n\tmsg = msg + '\\n\\t\\t</table>'\n\n\tmsg = msg + '\\n\\t\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\n\\t\\t\\t<tr>'\n\tmsg = msg + '\\n\\t\\t\\t\\t<td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t\\tYou did not accept a proposal from <a href=\\\"https://127.0.0.1:5000/profile?hero=\\\"' + buyer_prof_id + ' style=\"color:#1488CC\">' + buyer_name + '</a>.<br><br>'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t\\tMessage <a href=\"#\" style=\"color:#1488CC\">' + buyer_name + '</a> to see if you can work our a new date and time.'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t</font><br><br>'\n\tmsg = msg + '\\n\\t\\t\\t\\t</td>'\n\tmsg = msg + '\\n\\t\\t\\t</tr>'\n\tmsg = msg + '\\n\\t\\t</table>'\n\n\tmsg = msg + '\\n\\t\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t\\t<tr>'\n\tmsg = msg + '\\n\\t\\t\\t\\t<td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\n\\t\\t\\t\\t</td>'\n\tmsg = msg + '\\n\\t\\t\\t</tr>'\n\tmsg = msg + '\\n\\t\\t</table>'\n\n\tmsg = msg + '\\n\\t\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t\\t<tr>'\n\tmsg = msg + '\\n\\t\\t\\t\\t<td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\n\\t\\t\\t\\t</td>'\n\tmsg = msg + '\\n\\t\\t\\t</tr>'\n\tmsg = msg + '\\n\\t\\t</table>'\n\n\tmsg = msg + '\\n\\t</tr>'\n\tmsg = msg + '\\n</tbody>'\n\tmsg = msg + '</table>'\n\treturn msg", "def generate_selfservice_notice_email(context):\n subject = \"Self Service Form Submission\"\n from_email = settings.DEFAULT_FROM_ADDR\n to_email = [settings.EMAIL_TARGET_W, settings.EMAIL_TARGET_VP]\n\n cont_html = render_to_string('emails/email_selfservice.html', context)\n cont_text = render_to_string('emails/email_selfservice.txt', context)\n\n email = EmailMultiAlternatives(subject, cont_text, from_email, to_email)\n email.attach_alternative(cont_html, \"text/html\")\n\n return email", "def send_main_email(self):\n\n print \"Sending main email\"\n \n # Make an html table to be body of email\n html_table = '<table style=\"font-size:12px\">'\n html_table += self.make_nfs_changed_rows(\"sprint\") # New features only\n html_table += self.make_nfs_changed_rows(\"status\") # New features only\n html_table += self.make_time_in_status_rows(self.stalled_nf_issues) \n html_table += self.make_time_in_status_rows(self.stalled_st_issues) # Sub-tasks\n html_table += '</table>' # Closing table tag\n\n recipients = self.config.get(\"recipients\", \"emails\").split(\"\\n\") # [recipients] section in .ini file\n \n# emails = self.config.items('recipients')\n# for key, email in emails:\n# recipients = ', '.join(self.config.items('recipients'))\n \n print recipients\n# sys.exit()\n self.send_email(recipients, html_table)", "def get_mail( traceback ):\n msg = MIMEText( traceback )\n msg[ 'Subject' ] = Header( 'FX daily cron error' )\n msg[ 'From' ] = 'FX daily cron'\n msg[ 'To' ] = 'tamakoshihiroki@gmail.com'\n msg[ 'Date' ] = formatdate( localtime = 9 )\n msg[ 'Content-Type' ] = ''.join(\n [ 'text/plain; charset=\"', BODY_ENCODING, '\"', ] )\n return msg", "def exec(self): \r\n emails = self.args[0].split(',')\r\n for email in emails:\r\n send_mail(self.args[1], self.args[2], email)\r\n return_text = \"Sent Mail To :: \" + self.args[0] +\"\\n\" + self.args[1] + \":\\n\" + self.args[2]\r\n return return_text", "def send_ctr_alert(date, ctr):\n sender = \"team1_rs@outlook.com\"\n receivers = [\"alexa.hernandez@mail.mcgill.ca\"]\n msg = MIMEText(\n f\"Hello Team1,\\n\\nToday's CTR has dropped below {str(MIN_CTR*100)}%. The CTR is {str(ctr*100)}%.\\nPlease \"\n f\"investigate immediately.\"\n )\n\n msg[\"Subject\"] = \"Team1 Recommendation Service - CTR Alert\"\n msg[\"From\"] = sender\n msg[\"To\"] = \";\".join(receivers)\n\n try:\n smtpObj = smtplib.SMTP(\"smtp.office365.com\", 587)\n smtpObj.ehlo()\n smtpObj.starttls()\n smtpObj.login(\"team1_rs@outlook.com\", \"team1*rs\")\n smtpObj.sendmail(sender, receivers, msg.as_string())\n print(\"Successfully sent email\")\n except smtplib.SMTPException as e:\n print(\"Error: unable to send email\")", "def send_confirmation(send_to, apply_info):\n msg = \"\"\"Hello,\n\nThis is a friendly confirmation for your Simply Apply application for position '{job_title}' at {job_company}.\n\nThank you,\nThe Simply Hired Team\"\"\".format(**apply_info)\n\n send_email('Simply Apply <noreply@simplyhired.com>', send_to, 'Simply Apply Confirmation', msg)", "def _get_message_body(self, template_file, message_data):\r\n\r\n msg = \"\"\"\r\nYour bookmark import is complete! We've begun processing your bookmarks to\r\nload their page contents and fulltext index them. This process might take a\r\nwhile if you have a large number of bookmarks. Check out your imported\r\nbookmarks at https://bmark.us/{username}/recent.\r\n\r\n---\r\nThe Bookie Team\"\"\".format(**message_data)\r\n return msg", "def send_alert_email(email: str, username: str, attribute: str,\n value: Union[int,float], recorded_date: datetime,\n sensor_id: str, threshold: Union[int, float], \n alert_description: str) -> bool:\n\n sensor = Sensor.get_by_id(sensor_id)\n sensor_lat, sensor_lon = None, None\n if sensor:\n loc = Location.get_by_id(sensor.l_id)\n if loc:\n sensor_lat = loc.lat\n sensor_lon = loc.lon\n else:\n logger.info(\"Could not include sensor information in alert \"\n \"email as sensor with ID {} does not \"\n \"exist\".format(sensor_id))\n\n if alert_description == \"exceeded\":\n diff = value - threshold\n else:\n diff = threshold - value\n\n text_version = GetConfig.configure('alert', 'text_template').format(\n username=username, attribute=attribute, threshold=threshold,\n sensor_id=sensor_id, lat=sensor_lat, lon=sensor_lon,value=value,\n recorded_date=recorded_date, verb=alert_description, diff=diff)\n\n html_version = flask.render_template(\n GetConfig.configure('alert', 'html_template'), username=username,\n attribute=attribute, threshold=threshold, sensor_id=sensor_id,\n lat=sensor_lat, lon=sensor_lon,value=value,\n recorded_date=recorded_date, verb=alert_description, diff=diff)\n\n sg = sendgrid.SendGridAPIClient(apikey=GetConfig.configure('sendgrid',\n 'api_key'))\n\n from_email = Email(GetConfig.configure('alert', 'sender_email'))\n to_email = Email(email)\n subject = GetConfig.configure('alert', 'email_subject')\n content_text = Content(\"text/plain\", text_version)\n send_alert = Mail(from_email, subject, to_email, content_text)\n content_html = Content(\"text/html\", html_version)\n send_alert.add_content(content_html)\n\n try:\n email_response = sg.client.mail.send.post(\n request_body=send_alert.get())\n logger.info(\"Sent alert email to {} with \"\n \"response code : {}\".format(email,\n email_response.status_code))\n return True\n except http.client.IncompleteRead as e:\n logger.error(\"Sendgrid API Key may not be set correctly or be \"\n \"invalid\", e)\n return False", "def get_last_trial_communication_email(account):\n\n SUBJECT = \"Foojal: Your trial is over!\"\n EMAIL_CONTENT = \"\"\"\n\nHello %s\n\nWe hope you liked your Foojal.com trial and that you will join us for a full year for only $24.00.\n\nTo get a full year subscription to the best online photo food journal, go to your account page at http://app.foojal.com/account.\n\nIf you have any questions, please email us; we would love to talk with you.\n\nThank you, Kathy and Adam\n\n\"\"\"\n message = EmailMessage()\n message.sender = settings.SITE_EMAIL\n message.to = account.user.email()\n message.subject = SUBJECT\n message.body = EMAIL_CONTENT % account.nickname\n return message", "def _get_message_body(self, template_file, message_data):\r\n return \"\"\"\r\nPlease click the link below to activate your account.\r\n\r\n{0}\r\n\r\nWe currently support importing from Google Bookmarks and Delicious exports.\r\nImporting from a Chrome or Firefox export does work, however it reads the\r\nfolder names in as tags. So be aware of that.\r\n\r\nGet the Chrome extension from the Chrome web store:\r\nhttps://chrome.google.com/webstore/detail/knnbmilfpmbmlglpeemajjkelcbaaega\r\n\r\nIf you have any issues feel free to join #bookie on freenode.net or report\r\nthe issue or idea on https://github.com/bookieio/Bookie/issues.\r\n\r\nWe also encourage you to sign up for our mailing list at:\r\nhttps://groups.google.com/forum/#!forum/bookie_bookmarks\r\n\r\nand our Twitter account:\r\nhttp://twitter.com/BookieBmarks\r\n\r\nBookie is open source. Check out the source at:\r\nhttps://github.com/bookieio/Bookie\r\n\r\n---\r\nThe Bookie Team\"\"\".format(message_data)", "def email_body_meeting_rejected_notification_to_buyer(meeting, sellr_name):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\">\\n<tbody>\\n\\t<tr>\\n\\t\\t<td align=\"center\" valign=\"top\"></td>\\n\\t</tr>\\n</tbody>\\n</table>\\n\\n'\n\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\">'\n\tmsg = msg + '\\n<tbody>'\n\tmsg = msg + '\\n\\t<tr>'\n\n\tmsg = msg + '\\n\\t\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t\\t<tbody>'\n\n\tmsg = msg + '\\n\\t\\t\\t\\t<tr>'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t<td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t</td>'\n\tmsg = msg + '\\n\\t\\t\\t\\t</tr>'\n\tmsg = msg + '\\n\\t\\t\\t</tbody>'\n\tmsg = msg + '\\n\\t\\t</table>'\n\n\tmsg = msg + '\\n\\t\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\n\\t\\t\\t<tr>'\n\tmsg = msg + '\\n\\t\\t\\t\\t<td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t\\t' + sellr_name + ' didn\\'t accept your proposal this time around.<br><br>'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t\\tWhy, you ask? There could be many reasons, but trust us, don\\'t take it personally. <br><br>'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t\\tNeed to edit, manage or update the appointment? Go for it, or follow up with ' + sellr_name + '.'\n\tmsg = msg + '\\n\\t\\t\\t\\t\\t</font><br><br>'\n\tmsg = msg + '\\n\\t\\t\\t\\t</td>'\n\tmsg = msg + '\\n\\t\\t\\t</tr>'\n\tmsg = msg + '\\n\\t\\t</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:10px;\"> <a href=\"mailto:thegang@insprite.co\" style=\"color:#1488CC\">Contact Us</a>'\n\tmsg = msg + '\\t\\t| Sent by <a href=\"https://insprite.co\" style=\"color:#1488CC\">Insprite</a>, California, USA. | <a href=\"#\" style=\"color:#1488CC\">Unsubscribe</a></font>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr> <td style=\"border-top: 0px solid #333333; border-bottom: 0px solid #FFFFFF;\">'\n\tmsg = msg + '\\t\\t<img width=\"596px\" src=\"http://ryanfbaker.com/insprite/footerImage.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def send_email(geocentric_coordinates_transformated_to_ITRF_final_list, data):\n pandas.read_json(json.dumps(geocentric_coordinates_transformated_to_ITRF_final_list)).to_excel(\n data_output + \"/\" + data['filename'] + \"_results.xlsx\")\n msg = Message('ITRF Transformations', sender=app.config['MAIL_USERNAME'], recipients=[data['email']])\n msg.body = make_email_message(data['itrf_begin'], data['epoch_begin'], data['itrf_final'], data['epoch_final'],\n data['velocity'], data['date'])\n with app.open_resource(data_output + \"/\" + data['filename'] + \"_results.xlsx\") as fp:\n file_name = data['filename'] + \"_results\"\n msg.attach(file_name + \".xlsx\", file_name + \"/xlsx\", fp.read())\n mail.send(msg)", "def task_failed_notify(name, attempts, last_error, date_time, task_name, task_params):\n body = loader.render_to_string(\n 'notification/email/notify_failed_task.html', {\n 'name': name,\n 'attempts': attempts,\n 'last_error': last_error,\n 'date_time': date_time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n 'task_name': task_name,\n 'task_params': task_params,\n 'signature': settings.EMAIL_SIGNATURE\n })\n subject = name + \" has failed\"\n mail_admins(subject, body, settings.DEFAULT_FROM_EMAIL)", "def get_first_trial_communication_email(account):\n\n SUBJECT = 'Foojal: First couple of days'\n EMAIL_CONTENT = \"\"\"\n\nHello %s\n\nJust checking to see how you are liking your first few days of Foojal.com.\nIf you have any questions during your trial period, please email us; we would\nlove to talk with you.\n\nYour Team:\n%s\"\"\"\n\n message = EmailMessage()\n message.sender = settings.SITE_EMAIL\n message.to = account.user.email()\n message.subject = SUBJECT\n message.body = EMAIL_CONTENT % (account.nickname, settings.SITE_EMAIL)\n return message", "def _get_message_body(self, template_file, message_data):\r\n\r\n msg = \"\"\"\r\nYour import has failed. The error is listed below. Please file a bug at\r\nhttps://github.com/bookieio/bookie/issues if this error continues. You may\r\nalso join #bookie on freenode irc if you wish to aid in debugging the issue.\r\nIf the error pertains to a specific bookmark in your import file you might try\r\nremoving it and importing the file again.\r\n\r\nError\r\n----------\r\n\r\n{exc}\r\n\r\nA copy of this error has been logged and will be looked at.\r\n\r\n---\r\nThe Bookie Team\"\"\".format(**message_data)\r\n return msg", "def send_reminder():\n\n name = config[\"email\"][\"name\"]\n user = config[\"email\"][\"user\"]\n subject = \"REMINDER: %s\" % sys.argv[1]\n body = sys.argv[2] if len(sys.argv) > 2 else \"\"\n email_helper.send(user, name, user, subject, body)", "def notification(self, approver_list):\n dns_name = axops_client.get_dns()\n job_id = self.root_id\n url_to_ui = 'https://{}/app/jobs/job-details/{}'.format(dns_name, job_id)\n service = axops_client.get_service(job_id)\n\n html_payload = \"\"\"\n<html>\n<body>\n <table class=\"email-container\" style=\"font-size: 14px;color: #333;font-family: arial;\">\n <tr>\n <td class=\"msg-content\" style=\"padding: 20px 0px;\">\n The {} job is waiting for your approval. The job was triggered by {}.\n </td>\n </tr>\n <tr>\n <td class=\"commit-details\" style=\"padding: 20px 0px;\">\n <table cellspacing=\"0\" style=\"border-left: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;border-top: 1px solid #e3e3e3;\">\n <tr>\n <td class=\"item-label\" style=\"font-weight: bold;height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;\">Author</td>\n <td class=\"item-value\" style=\"height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;\">{}</td>\n </tr>\n <tr>\n <td class=\"item-label\" style=\"font-weight: bold;height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;\">Repo</td>\n <td class=\"item-value\" style=\"height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;\">{}</td>\n </tr>\n <tr>\n <td class=\"item-label\" style=\"font-weight: bold;height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;\">Branch</td>\n <td class=\"item-value\" style=\"height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;\">{}</td>\n </tr>\n <tr>\n <td class=\"item-label\" style=\"font-weight: bold;height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;\">Description</td>\n <td class=\"item-value\" style=\"height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;\">{}</td>\n </tr>\n <tr>\n <td class=\"item-label\" style=\"font-weight: bold;height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;\">Revision</td>\n <td class=\"item-value\" style=\"height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;\">{}</td>\n </tr>\n </table>\n </td>\n </tr>\n <tr>\n <td class=\"view-job\">\n <div>\n <!--[if mso]>\n <v:roundrect xmlns:v=\"urn:schemas-microsoft-com:vml\" xmlns:w=\"urn:schemas-microsoft-com:office:word\" href=\"{}\" style=\"height:40px;v-text-anchor:middle;width:150px;\" arcsize=\"125%\" strokecolor=\"#00BDCE\" fillcolor=\"#7fdee6\">\n <w:anchorlock/>\n <center style=\"color:#333;font-family:arial;font-size:14px;font-weight:bold;\">VIEW JOB</center>\n </v:roundrect>\n<![endif]--><a href=\"{}\" style=\"background-color:#7fdee6;border:1px solid #00BDCE;border-radius:50px;color:#333;display:inline-block;font-family:arial;font-size:14px;font-weight:bold;line-height:40px;text-align:center;text-decoration:none;width:150px;-webkit-text-size-adjust:none;mso-hide:all;\">VIEW JOB</a></div>\n </td>\n </tr>\n <tr>\n <td class=\"view-job\">\n <div>\n <!--[if mso]>\n <v:roundrect xmlns:v=\"urn:schemas-microsoft-com:vml\" xmlns:w=\"urn:schemas-microsoft-com:office:word\" href=\"{}\" style=\"height:40px;v-text-anchor:middle;width:150px;\" arcsize=\"125%\" strokecolor=\"#00BDCE\" fillcolor=\"#7fdee6\">\n <w:anchorlock/>\n <center style=\"color:#333;font-family:arial;font-size:14px;font-weight:bold;\">APPROVE</center>\n </v:roundrect>\n<![endif]--><a href=\"{}\" style=\"background-color:#7fdee6;border:1px solid #00BDCE;border-radius:50px;color:#333;display:inline-block;font-family:arial;font-size:14px;font-weight:bold;line-height:40px;text-align:center;text-decoration:none;width:150px;-webkit-text-size-adjust:none;mso-hide:all;\">APPROVE</a></div>\n </td>\n </tr>\n <tr>\n <td class=\"view-job\">\n <div>\n <!--[if mso]>\n <v:roundrect xmlns:v=\"urn:schemas-microsoft-com:vml\" xmlns:w=\"urn:schemas-microsoft-com:office:word\" href=\"{}\" style=\"height:40px;v-text-anchor:middle;width:150px;\" arcsize=\"125%\" strokecolor=\"#00BDCE\" fillcolor=\"#7fdee6\">\n <w:anchorlock/>\n <center style=\"color:#333;font-family:arial;font-size:14px;font-weight:bold;\">DECLINE</center>\n </v:roundrect>\n<![endif]--><a href=\"{}\" style=\"background-color:#7fdee6;border:1px solid #00BDCE;border-radius:50px;color:#333;display:inline-block;font-family:arial;font-size:14px;font-weight:bold;line-height:40px;text-align:center;text-decoration:none;width:150px;-webkit-text-size-adjust:none;mso-hide:all;\">DECLINE</a></div>\n </td>\n </tr>\n <tr>\n <td class=\"thank-you\" style=\"padding-top: 20px;line-height: 22px;\">\n Thanks,<br>\n Argo Project\n </td>\n </tr>\n </table>\n</body>\n</html>\n\"\"\"\n\n for user in approver_list:\n\n approve_token, decline_token = self.generate_token(user=user, dns_name=dns_name)\n\n approve_link = \"https://{}/v1/results/id/approval?token={}\".format(dns_name, approve_token)\n decline_link = \"https://{}/v1/results/id/approval?token={}\".format(dns_name, decline_token)\n\n msg = {\n 'to': [user],\n 'subject': 'The {} job requires your approval to proceed'.format(service['name']),\n 'body': html_payload.format(service['name'], service['user'],\n service['commit']['author'], service['commit']['repo'],\n service['commit']['branch'], service['commit']['description'], service['commit']['revision'],\n url_to_ui, url_to_ui, approve_link, approve_link, decline_link, decline_link),\n 'html': True\n }\n\n if service['user'] != 'system':\n try:\n user_result = axops_client.get_user(service['user'])\n msg['display_name'] = \"{} {}\".format(user_result['first_name'], user_result['last_name'])\n except Exception as exc:\n logger.error(\"Fail to get user %s\", str(exc))\n\n logger.info('Sending approval requests to %s', str(user))\n result = axsys_client.send_notification(msg)\n\n # TODO: Tianhe adding retry mechanism\n if result.status_code != 200:\n logger.error('Cannot send approval request, %s', result.content)\n sys.exit(1)\n logger.info('Successfully sent approval requests to reviewers.')", "def generate_plain_mesg(info, open_quests, owner, tags):\n\n msg = (\n \"This email is being sent to {} because that is the owner listed\\n\"\n \"for the systems with open Hermes labors listed below.\\n\\n\"\n \"Due dates, if any, are noted with each quest.\\n\".format(owner)\n )\n msg += (\n \"\\nTo throw an event manually, you can run the following command \"\n \"on a shell server:\"\n \"\\n\\n\"\n \"$ hermes event create [event] --host [hostname].\\n\\n\"\n \"Or you can visit the quests linked below.\\n\\n\".format(\n settings.frontend)\n )\n for quest_id in info[owner]:\n quest = find_quest(open_quests, quest_id)\n if quest:\n msg += (\n \"==[ QUEST {} ]================================\\n\"\n \"CREATOR: {}\\n\"\n ).format(\n quest_id, quest.creator\n )\n if quest.target_time:\n msg += \"DUE: {}\\n\".format(quest.target_time)\n msg += \"DESC: \\\"{}\\\"\\n\".format(textwrap.fill(\n quest.description,\n width=60, subsequent_indent=\"\"\n ))\n msg += \"LINK: {}/v1/quests/{}\\n\\n\".format(\n settings.frontend, quest_id\n )\n else:\n msg += \" Labors not associated with a quest:\\n\\n\"\n\n msg += \"Machines with labors:\\n\"\n\n for hostname in sorted(info[owner][quest_id]):\n if tags[hostname]:\n tags_str = \"{}\".format((\", \".join(tags[hostname])))\n else:\n tags_str = \"no services\"\n msg += \" {} ({})\\n\".format(hostname, tags_str)\n\n msg += \"\\n\\n\"\n\n return msg", "def composeSummaryEmail(self):\r\n message = \"\"\"From: Douglas Gregor <dgregor@osl.iu.edu>\r\nTo: boost@lists.boost.org\r\nReply-To: boost@lists.boost.org\r\nSubject: [Report] \"\"\"\r\n message += str(self.numFailures()) + \" failures on \" + branch\r\n if branch != 'trunk':\r\n message += ' branch'\r\n message += \" (\" + str(datetime.date.today()) + \")\"\r\n message += \"\"\"\r\n\r\nBoost regression test failures\r\n\"\"\"\r\n message += \"Report time: \" + self.date + \"\"\"\r\n\r\nThis report lists all regression test failures on high-priority platforms.\r\n\r\nDetailed report:\r\n\"\"\"\r\n\r\n message += ' ' + self.url + '\\n\\n'\r\n\r\n if self.numFailures() == 0:\r\n message += \"No failures! Yay!\\n\"\r\n return message\r\n \r\n # List the platforms that are broken\r\n any_broken_platforms = self.numReportableFailures() < self.numFailures()\r\n if any_broken_platforms:\r\n message += \"\"\"The following platforms have a large number of failures:\r\n\"\"\"\r\n for platform in sorted_keys( self.platforms ):\r\n if self.platforms[platform].isBroken():\r\n message += (' ' + platform + ' ('\r\n + str(len(self.platforms[platform].failures))\r\n + ' failures)\\n')\r\n\r\n message += \"\"\"\r\nFailures on these \"broken\" platforms will be omitted from the results below.\r\nPlease see the full report for information about these failures.\r\n\r\n\"\"\"\r\n \r\n # Display the number of failures\r\n message += (str(self.numReportableFailures()) + ' failures in ' + \r\n str(len(self.libraries)) + ' libraries')\r\n if any_broken_platforms:\r\n message += (' (plus ' + str(self.numFailures() - self.numReportableFailures())\r\n + ' from broken platforms)')\r\n \r\n message += '\\n'\r\n\r\n # Display the number of failures per library\r\n for k in sorted_keys( self.libraries ):\r\n library = self.libraries[k]\r\n num_failures = library.numFailures()\r\n message += ' ' + library.name + ' ('\r\n \r\n if library.numReportableFailures() > 0:\r\n message += (str(library.numReportableFailures())\r\n + \" failures\")\r\n \r\n if library.numReportableFailures() < num_failures:\r\n if library.numReportableFailures() > 0:\r\n message += ', plus '\r\n \r\n message += (str(num_failures-library.numReportableFailures()) \r\n + ' failures on broken platforms')\r\n message += ')\\n'\r\n pass\r\n\r\n message += '\\n'\r\n\r\n # Provide the details for the failures in each library.\r\n for k in sorted_keys( self.libraries ):\r\n library = self.libraries[k]\r\n if library.numReportableFailures() > 0:\r\n message += '\\n|' + library.name + '|\\n'\r\n for test in library.tests:\r\n if test.numReportableFailures() > 0:\r\n message += ' ' + test.name + ':'\r\n for failure in test.failures:\r\n platform = failure.platform\r\n if not platform.isBroken():\r\n message += ' ' + platform.name\r\n message += '\\n'\r\n\r\n return message", "def get_second_trial_communication_email(account):\n\n SUBJECT = \"Foojal: Don't lose out.\"\n EMAIL_CONTENT = \"\"\"\n\nHello %s\n\nJust checking to see how you are liking your Foojal.com trial subscription.\n\nSign up today for a full year of Foojal.com for only $24.00 a year before we increase the price.\nThat's only $2.00 a month.\n\nIf you have any questions during your trial period, please email us; we would\nlove to talk with you.\n\nThank you, Kathy and Adam\n%s\"\"\"\n\n message = EmailMessage()\n message.sender = settings.SITE_EMAIL\n message.to = account.user.email()\n message.subject = SUBJECT\n message.body = EMAIL_CONTENT % (account.nickname, settings.SITE_EMAIL)\n return message", "def email_out(name_of_updater = \"default_yorai\", remaining_string = \"#/#\", story_text = \"\"):\n # Get the authors to email map from json file\n with open(\"last_and_story.json\") as jsonfile:\n # Get the authors of the file.\n data = json.load(jsonfile)\n authors_to_emails = data[\"authors\"]\n # story = data['story']\n # remaining = data['remaining']\n\n gmail_user = email_addr\n sent_from = email_addr\n subject = \"Storyline Update!\"\n\n server = smtplib.SMTP_SSL('smtp.gmail.com', 465)\n server.ehlo()\n server.login(email_addr, password)\n\n for name, to in authors_to_emails.items():\n body = \"Hello \"+name+ \" the Hooman!\\n\\nThe author \" + str(name_of_updater) + \" has posted an update to your shared story!\\n\"\n body += \"There are \" + remaining_string + \" lines remaining.\\n\\n\"\n if story_text != \"\":\n body += \"Story is DONE! Here it is:\\n\" + story_text + \"\\n\\n\"\n \n \n \n body += \"\\nCheck it out here:\\n http://scripts.mit.edu/~yorai/storyline/\\n\\n\"\n body+= \"May you be forgiven for your sins,\\nThe Storyline team.\\n\\n\"\n body += \"[https://media.giphy.com/media/IcifS1qG3YFlS/giphy.gif]\"\n \n \n email_text = \"\"\n email_text += 'From: %s\\n' % gmail_user\n email_text += 'To: %s\\n' % ','.join([to])\n email_text += 'Subject: %s\\n\\n' % subject\n email_text += body\n \n server.sendmail(sent_from, to, email_text)\n\n server.close()", "def mail_send():\n report_file_path = (\n f'{os.path.abspath(\".\")}/{Common.get_config_value(\"report_location\")}'\n )\n with open(f\"{report_file_path}/subject\", \"rb\") as subject_handler:\n subject = pickle.load(subject_handler)\n with open(f\"{report_file_path}/{'recipient'}\", \"rb\") as recipient_handler:\n recipient = pickle.load(recipient_handler)\n report_file_path = (\n f\"{os.path.abspath('.')}/{Common.get_config_value('report_location')}\"\n )\n try:\n if os.path.isfile(f\"{report_file_path}/mail_report.html\"):\n os.popen(\n f\"ssh -i {Common.get_config_value('build_server_pemfile')} \"\n f\"-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no\"\n f\" root@{Common.get_config_value('build_server_hostname')}\"\n f\" {Common.get_config_value('mail_script_location')}/\"\n f\"{Common.get_config_value('mail_script_name')} \"\n f\"{subject} {recipient}\"\n )\n Common.logger.info(\"Mail send successfully\")\n except Exception as ex:\n Common.logger.warning(f\"Mail sent failed due to exception: {ex}\")", "def _get_message_body(self, template_file, message_data):\r\n return \"\"\"\r\nHello {username}:\r\n\r\nPlease activate your Bookie account by clicking on the following url:\r\n\r\n{url}\r\n\r\n---\r\nThe Bookie Team\"\"\".format(**message_data)\r\n # lookup = config['pylons.app_globals'].mako_lookup\r\n # template = lookup.get_template(template_file)\r\n\r\n # # template vars are a combo of the obj dict and the extra dict\r\n # template_vars = {'data': message_data}\r\n # return template.render(**template_vars)\r", "def email_body_cancellation_from_buyer_within_24_hours(sellr_name, cost):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\"> You cancelled the appointment with <a href=\"#\" style=\"color:#1488CC\">' + sellr_name + '</a>.<br><br>'\n\tmsg = msg + '\\t\\t\\t We know life can be busy, but we also value accountability within the community and adhere to a <a href=\"#\" style=\"color:#1488CC\">24-hour cancellation policy</a>. You will be charged <a href=\"#\" style=\"color:#1488CC\">$' + str(cost) + '</a> for the service. <br><br>'\n\tmsg = msg + '\\t\\t\\t Questions? <a href=\"#\" style=\"color:#1488CC\">Drop us a line</a> or read our <a href=\"#\" style=\"color:#1488CC\">Terms of Service</a> and <a href=\"#\" style=\"color:#1488CC\">cancellation policies</a> for additional information. </font><br><br>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def _set_email(settings, excel):\n weekend = excel.week_end()\n week_start = excel.week_start()\n dest = [str(settings.user), str(settings.dest_addr)]\n msg = MIMEMultipart('alternative')\n msg['Subject'] = 'Time sheet'\n msg['From'] = settings.user\n msg['To'] = \", \".join(dest)\n\n body = \"\"\"Howdy,\\n\\nHere is my time sheet from %s to %s. Have a good weekend!\\n\\nThanks,\\n\\n%s\"\"\" %\\\n (week_start, weekend, settings.name)\n\n derp = MIMEText(body, 'plain')\n msg.attach(derp)\n\n attach = MIMEBase('application', \"octet-stream\")\n attach.set_payload(open(settings.active_xl, \"rb\").read())\n encoders.encode_base64(attach)\n attach.add_header('Content-Disposition', 'attachment; filename=\"Timesheet ' + settings.name + '.xlsx\"')\n\n msg.attach(attach)\n\n return msg", "def send_weekly_report_slack():\n quarterly_text = get_report_text(90)\n annual_text = get_report_text(365)\n text = (\n \"*Monthly Metrics Report*\\n\"\n \"This is an automated monthly report on some of our key metrics.\\n\\n\"\n f\"\\tIn the last 90 days we saw:\\n\\n{quarterly_text}\\n\\n\"\n f\"\\tIn the last 365 days we saw:\\n\\n{annual_text}\\n\\n\"\n f\"See more details at our submissions/outcomes <{PUBLIC_REPORTING_URL}|dashboard>.\"\n )\n send_slack_message(settings.SLACK_MESSAGE.WEEKLY_REPORT, text)", "def sendEmail(householdID):\n contactID = mdb.getContact(householdID)\n sqlq = \"\"\"\n SELECT Name, Surname, Address1, Address2, Town, Postcode, email, status\n FROM Contact\n WHERE idContact = '{}';\n \"\"\".format(contactID)\n result = mdb.getSQL(sqlq)[0]\n\n thisName = (\"%s\" % (result['Name']))\n thisEmail = (\"%s\" % (result['email']))\n thisStatus = (\"%s\" % (result['status']))\n\n # prepare the custom email\n thisPath = os.path.dirname(os.path.abspath(__file__))\n if (thisStatus == 'de'):\n emailPath = os.path.join(thisPath, \"emails/email_graph_de.html\")\n locale.setlocale(locale.LC_ALL, 'de_DE.utf8')\n else:\n emailPath = os.path.join(thisPath, \"emails/email_graph.html\")\n dtChoice = mdb.getHHdtChoice(householdID)\n thisDate = dtChoice.strftime(\"%A, %-d %B\")\n\n templateFile = open(emailPath, \"r\")\n templateText = templateFile.read()\n templateFile.close()\n templateText = templateText.replace(\"[householdID]\", householdID)\n templateText = templateText.replace(\"[contactID]\", contactID)\n templateText = templateText.replace(\"[name]\", thisName)\n templateText = templateText.replace(\"[date]\", thisDate)\n templateText = templateText.replace(\"[securityCode]\", mdb.getSecurityCode(householdID))\n\n # Subject\n subjectLine = templateText.splitlines()[0]\n templateText = templateText[templateText.find('\\n') + 1:] # find line break and return all from there - i.e. remove first line\n \n # email file\n emailFilePath = os.path.join(thisPath, \"tempEmail.htmail\")\n emailFile = open(emailFilePath, \"w+\")\n emailFile.write(templateText)\n emailFile.close()\n\n # call('mutt -e \"set content_type=text/html\" -s \"[TESTING]' + subjectLine + '\" philipp.grunewald@ouce.ox.ac.uk < ' + emailFilePath, shell=True)\n call('mutt -e \"set content_type=text/html\" -s \"' + subjectLine + '\" ' + thisEmail + ' -b meter@energy.ox.ac.uk < ' + emailFilePath, shell=True)", "def sendNotification(self):\n if not(self.errors or self.accounting):\n return S_OK()\n\n emailBody = \"\"\n rows = []\n for instanceName, val in self.accounting.iteritems():\n rows.append([[instanceName],\n [val.get('Treatment', 'No Treatment')],\n [str(val.get('LogAge', 'Not Relevant'))]])\n\n if rows:\n columns = [\"Instance\", \"Treatment\", \"Log File Age (Minutes)\"]\n emailBody += printTable(columns, rows, printOut=False, numbering=False, columnSeparator=' | ')\n\n if self.errors:\n emailBody += \"\\n\\nErrors:\"\n emailBody += \"\\n\".join(self.errors)\n\n self.log.notice(\"Sending Email:\\n\" + emailBody)\n for address in self.addressTo:\n res = self.nClient.sendMail(address, self.emailSubject, emailBody, self.addressFrom, localAttempt=False)\n if not res['OK']:\n self.log.error(\"Failure to send Email notification to \", address)\n continue\n\n self.errors = []\n self.accounting.clear()\n\n return S_OK()", "def send_feedback_email_task(subject, message, sender, reciever):\n logger.info(\"Reminder email\")\n return send_reminder_mail(subject, message, sender, reciever)", "def verification_email_body(case_name, url, display_name, category, subcategory, breakpoint_1, breakpoint_2, hgnc_symbol, panels, gtcalls, tx_changes, name, comment):\n html = \"\"\"\n <ul>\n <li>\n <strong>Case {case_name}</strong>: <a href=\"{url}\">{display_name}</a>\n </li>\n <li><strong>Variant type</strong>: {category} ({subcategory})\n <li><strong>Breakpoint 1</strong>: {breakpoint_1}</li>\n <li><strong>Breakpoint 2</strong>: {breakpoint_2}</li>\n <li><strong>HGNC symbols</strong>: {hgnc_symbol}</li>\n <li><strong>Gene panels</strong>: {panels}</li>\n <li><strong>GT call</strong></li>\n {gtcalls}\n <li><strong>Amino acid changes</strong></li>\n {tx_changes}\n <li><strong>Comment</strong>: {comment}</li>\n <li><strong>Ordered by</strong>: {name}</li>\n </ul>\n \"\"\".format(\n case_name=case_name,\n url=url,\n display_name=display_name,\n category=category,\n subcategory=subcategory,\n breakpoint_1=breakpoint_1,\n breakpoint_2=breakpoint_2,\n hgnc_symbol=hgnc_symbol,\n panels=panels,\n gtcalls=gtcalls,\n tx_changes=tx_changes,\n name=name,\n comment=comment)\n\n return html", "def send_email(msg_body: str, run_path: str, experiment_name: str, run_number: int) -> None:\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n server.login(\"kafkabot9000@gmail.com\", \"thisisnotimportant\")\n\n msg = MIMEMultipart()\n\n files = [f\"{run_path}/cumulative_reward_{experiment_name}_R{run_number}_plot.png\",\n f\"{run_path}/max_reward_{experiment_name}_R{run_number}_plot.png\"]\n\n msg_body += \"\\n\"\n msg_body += time.asctime(time.localtime(time.time()))\n msg.attach(MIMEText(msg_body))\n\n for f in files:\n with open(f, \"rb\") as fil:\n part = MIMEApplication(\n fil.read(),\n Name=basename(f)\n )\n # After the file is closed\n part['Content-Disposition'] = 'attachment; filename=\"%s\"' % basename(f)\n msg.attach(part)\n\n server.sendmail(\"kafkabot9000@gmail.com\", \"baytemiz@ucsc.edu\", msg.as_string())\n server.quit()", "def notify_user(self, svno, ops):\n\n self.sr=svno\n self.ops=ops\n try:\n from email.mime.text import MIMEText\n from email.mime.multipart import MIMEMultipart\n except Exception, imperr:\n print(\"emailNotify failure - import error %s\" % imperr)\n return(-1)\n nHtml = []\n noHtml = \"\"\n clientEmail = ['helpdesk@mscsoftware.com']\n msg = MIMEMultipart()\n # This is the official email notifier\n rtUser = 'DONOTREPLY@mscsoftware.com'\n\n msg['From'] = rtUser\n msg['To'] = \", \".join(clientEmail)\n if self.data['groupid'] == 'Nastran-RG':\n msg[\"Cc\"] = \"msc-itsupport@mscsoftware.com,\\\n DL-ENG-BUILD@mscsoftware.com,\\\n raj.behera@mscsoftware.com\"\n elif self.data['groupid'] == 'Patran-RG':\n msg[\"Cc\"] = \"msc-itsupport@mscsoftware.com,\\\n DL-ENG-BUILD@mscsoftware.com,\\\n raj.behera@mscsoftware.com\"\n else:\n msg[\"Cc\"] = \"msc-itsupport@mscsoftware.com,\\\n DL-ENG-BUILD@mscsoftware.com,\\\n raj.behera@mscsoftware.com\"\n\n if self.ops == 'ipnw':\n msg['Subject'] = '%s regression got impacted due \\\n to vCAC cloud for VMID %s' % \\\n ( pdict[self.data['groupid']], self.sr['requestNumber'])\n else:\n msg['Subject'] = '%s regression got impacted due \\\n to vCAC cloud for service request: %s' % \\\n ( pdict[self.data['groupid']], self.sr['requestNumber'])\n\n nHtml.append(\"<html> <head></head> <body> <p>Jenkin's \\\n vCAC cloud client notification<br>\")\n nHtml.append(\"<b>Hi Helpdesk,</b><br><br><br>\")\n nHtml.append(\"Please create a ticket to solve the \\\n following problem and notify infra team.\")\n if self.ops == 'ipnw':\n nHtml.append(\"VM creation readiness from vCAC \\\n cloud is taking long time, \\\n vm creation service request completed, \\\n But network configuration is having an issue \\\n for VMID <b>%s</b> is stuck. \" % self.sr['requestNumber'])\n else:\n nHtml.append(\"Creation of VM through vCAC cloud is taking \\\n longer time than expected, the service \\\n request <b>%s</b> is stuck. \" % self.sr['requestNumber'])\n\n nHtml.append(\"Regression test for product <b>%s</b> \\\n is stuck and impacted.<br><br>\" % \\\n pdict[self.data['groupid']])\n if os.path.isdir(self.data['rundir']):\n jnfilepath=os.path.join(self.data['rundir'], 'hudjobname.dat')\n if os.path.isfile(jnfilepath):\n lines = [line.rstrip() for line in open(jnfilepath)]\n nHtml.append(\"Please follow job link for \\\n SR# related information.<br>\")\n nHtml.append(\"Jenkins Effected Job URL: <a href=%s> \\\n Effected Build Console \\\n </a><br><br><br>\" % (lines[0]))\n\n nHtml.append(\"This needs immediate attention.<br><br>\")\n nHtml.append(\"Regards,<br>\")\n nHtml.append(\"Rtest Administrator.<br>\")\n nHtml.append(\"[Note: This is an automated mail,\\\n Please do not reply to this mail.]<br>\")\n nHtml.append(\"</p> </body></html>\")\n noHtml = ''.join(nHtml)\n noBody = MIMEText(noHtml, 'html')\n msg.attach(noBody)\n s = smtplib.SMTP('postgate01.mscsoftware.com')\n s.sendmail(rtUser, [clientEmail] + msg[\"Cc\"].split(\",\"), msg.as_string())\n s.quit()\n return 0", "def main_email(name, total, answered, not_answered, declines, remaining):\n\n start = smtplib.SMTP(host=HOST, port=PORT)\n start.starttls()\n start.login(ADDRESS, PASSWORD)\n\n date = datetime.datetime.now()\n date_now = date.strftime(\"%m-%d-%Y\")\n\n print_list, email_dict = simple_contacts('contacts.txt')\n\n emails = get_emails(print_list, email_dict)\n\n message_template = read_template()\n\n for mail in emails:\n pretty_print(f\"Sending email to {mail}\", \"!\")\n msg = MIMEMultipart()\n\n message = message_template.substitute(PERSON_NAME=name, DATE=date_now, TOTAL_CALLED=total, ANSWERED=answered, NOT_ANSWERED=not_answered, DECLINES=declines, REMAINING=remaining)\n\n msg['From'] = ADDRESS\n msg['To'] = mail\n msg['Subject'] = f\"{name} - Calling Campaign Summary - {date_now}\"\n\n msg.attach(MIMEText(message, 'plain'))\n start.send_message(msg)\n pretty_print(f\"Mail sent to {mail}\", \"!\")\n\n del msg\n\n start.quit()", "def create_mail_content(daily: bool = False):\n if not daily:\n order = STATE['order'] if STATE['order'] else get_closed_order()\n trade_part = create_report_part_trade(order)\n performance_part = create_report_part_performance(daily)\n advice_part = create_report_part_advice()\n settings_part = create_report_part_settings()\n general_part = create_mail_part_general()\n\n if not daily:\n trade = [\"Last trade\", \"----------\", '\\n'.join(trade_part['mail']), '\\n\\n']\n performance = [\"Performance\", \"-----------\",\n '\\n'.join(performance_part['mail']) + '\\n* (change within 24 hours)', '\\n\\n']\n advice = [\"Assessment / advice\", \"-------------------\", '\\n'.join(advice_part['mail']), '\\n\\n']\n settings = [\"Your settings\", \"-------------\", '\\n'.join(settings_part['mail']), '\\n\\n']\n general = [\"General\", \"-------\", '\\n'.join(general_part), '\\n\\n']\n\n bcs_url = 'https://bitcoin-schweiz.ch/bot/'\n text = '' if daily else '\\n'.join(trade)\n\n if not CONF.info:\n text += '\\n'.join(performance) + '\\n'.join(advice) + '\\n'.join(settings) + '\\n'.join(general) + bcs_url + '\\n'\n else:\n text += '\\n'.join(performance) + '\\n'.join(advice) + '\\n'.join(settings) + '\\n'.join(general) + CONF.info \\\n + '\\n\\n' + bcs_url + '\\n'\n\n csv = None if not daily else INSTANCE + ';' + str(datetime.datetime.utcnow().replace(microsecond=0)) + ' UTC;' + \\\n (';'.join(performance_part['csv']) + ';' + ';'.join(advice_part['csv']) + ';' +\n ';'.join(settings_part['csv']) + ';' + CONF.info + '\\n')\n\n return {'text': text, 'csv': csv}", "def send_assignee_emails(self):\n\n assignees = list(set([obj.assignee for obj in self.stalled_nf_issues])) # Assignees from New Features\n assignees.extend(list(set([obj.assignee for obj in self.stalled_st_issues]))) # Add assignees from Sub-tasks\n recipients = self.config.get(\"recipients\", \"emails\").split(\"\\n\") # [recipients] section in .ini file\n\n for assignee in assignees:\n assignee_issues = [] # List of IssueClass objects\n # Get all stalled New feature issues for this assignee\n for item in self.stalled_nf_issues + self.stalled_st_issues:\n if item.assignee == assignee:\n# if item.assignee == \"ashih\":\n assignee_issues.append(item)\n assignee_email = item.assignee_email\n \n if len(assignee_issues):\n html_table = '<table style=\"font-size:12px\">'\n html_table += self.make_time_in_status_rows(assignee_issues)\n html_table += '</table>' # Closing table tag\n #recipients.append(assignee_email)\n print \"Sending email to: %s\" % recipients\n self.send_email(recipients, html_table, assignee)", "def send_mail_to_student(student, param_dict):\r\n\r\n # add some helpers and microconfig subsitutions\r\n if 'course' in param_dict:\r\n param_dict['course_name'] = param_dict['course'].display_name_with_default\r\n\r\n param_dict['site_name'] = microsite.get_value(\r\n 'SITE_NAME',\r\n param_dict['site_name']\r\n )\r\n\r\n subject = None\r\n message = None\r\n\r\n # see if we are running in a microsite and that there is an\r\n # activation email template definition available as configuration, if so, then render that\r\n message_type = param_dict['message']\r\n\r\n email_template_dict = {\r\n 'allowed_enroll': (\r\n 'emails/enroll_email_allowedsubject.txt',\r\n 'emails/enroll_email_allowedmessage.txt'\r\n ),\r\n 'enrolled_enroll': (\r\n 'emails/enroll_email_enrolledsubject.txt',\r\n 'emails/enroll_email_enrolledmessage.txt'\r\n ),\r\n 'allowed_unenroll': (\r\n 'emails/unenroll_email_subject.txt',\r\n 'emails/unenroll_email_allowedmessage.txt'\r\n ),\r\n 'enrolled_unenroll': (\r\n 'emails/unenroll_email_subject.txt',\r\n 'emails/unenroll_email_enrolledmessage.txt'\r\n ),\r\n 'add_beta_tester': (\r\n 'emails/add_beta_tester_email_subject.txt',\r\n 'emails/add_beta_tester_email_message.txt'\r\n ),\r\n 'remove_beta_tester': (\r\n 'emails/remove_beta_tester_email_subject.txt',\r\n 'emails/remove_beta_tester_email_message.txt'\r\n ),\r\n }\r\n\r\n subject_template, message_template = email_template_dict.get(message_type, (None, None))\r\n if subject_template is not None and message_template is not None:\r\n subject = render_to_string(subject_template, param_dict)\r\n message = render_to_string(message_template, param_dict)\r\n\r\n if subject and message:\r\n # Remove leading and trailing whitespace from body\r\n message = message.strip()\r\n\r\n # Email subject *must not* contain newlines\r\n subject = ''.join(subject.splitlines())\r\n from_address = microsite.get_value(\r\n 'email_from_address',\r\n settings.DEFAULT_FROM_EMAIL\r\n )\r\n\r\n send_mail(subject, message, from_address, [student], fail_silently=False)", "def generate_email(self):\n email_dict = {'donor_name':self.name,\n 'donation_amount':self.last_donation(),\n 'total_amount':self.total_donations()}\n\n # Create formatted email that can be copied & pasted\n email = ('\\n'.join(['Dear {donor_name},','',\n 'Thank you for your generous donation of ${donation_amount:.2f}.',\n 'To date, you have donated a total of ${total_amount:.2f} to our charity.',\n 'Your contributions help new arrivals receive the highest quality care possible.',\n 'Please know that your donations make a world of difference!',\n '','Sincerely,','The Good Place Team'])).format(**email_dict)\n\n return(email)", "def send_mail():\n msg = MIMEMultipart()\n msg[\"From\"] = \"SIRP-Reminders@company.com\"\n msg[\"To\"] = SENT_TO\n msg[\"Subject\"] = \"The Hive Case Metrics\"\n msg.attach(MIMEText(\"Attached are the requested case metrics in .XLSX format.\"))\n part = MIMEBase(\"application\", \"octet-stream\")\n part.set_payload(open(\"Hive Metrics.xlsx\", \"rb\").read())\n encoders.encode_base64(part)\n part.add_header(\"Content-Disposition\", 'attachment; filename=\"Hive Metrics.xlsx\"')\n msg.attach(part)\n smtp = smtplib.SMTP(SMTP_SERVER)\n smtp.starttls()\n smtp.sendmail(msg[\"From\"], msg[\"To\"].split(\",\"), msg.as_string())\n smtp.quit()", "def ph_notify(reading, reservoir_num):\n if reading >= 8:\n _pH = 'The water in reservoir: ' + reservoir_num + ' is Basic! With a \\\n reading of: ' + reading + '\\n'\n elif reading <= 6:\n _pH = 'The water in reservoir: ' + reservoir_num + ' is Acidic! With a \\\n reading of: ' + reading + '\\n' \n else:\n _pH = 'The water in reservoir: ' + reservoir_num + ' is Neutral! With \\\n a reading of: ' + reading + '\\n' \n notification_to_file('ph_tests_logs.txt', _pH)", "def send_email(client, smtp, server_list):\n content = create_body_html(client, server_list)\n\n # Email Configuration\n message = MIMEMultipart(\"alternative\")\n message[\"Subject\"] = '[Cyberwatch] Servers recovered from \"Communication failure\" report - ' + \\\n date.today().strftime(\"%m/%d/%y\")\n message[\"From\"] = smtp[\"sender\"]\n message[\"To\"] = \", \".join(EMAIL_RECEIVERS)\n\n # Get Period start date with \"Last Modified\" time of file\n start_date = datetime.fromtimestamp(os.path.getmtime(os.path.dirname(\n __file__) + '/communication_failure_list.txt')).strftime(\"%d/%m/%Y, %H:%M\")\n\n email_body = f\"\"\"\\\n <p>Greetings,</p>\n\n <p>Please find in the following section, a list of servers that recovered from the status\n \"Communication failure\".</p>\n\n <span style=\"color:#4bb9f1;font-size:18px;align:center\"><strong>Servers recovered from \"Communication Failure\"\n between {start_date} and {datetime.now().strftime(\"%d/%m/%Y, %H:%M\")}</strong></span>\n <br />\n\n <br />{content}<br />\n\n <p>The Cyberwatch Team - support@cyberwatch.fr</p>\n \"\"\"\n\n # Add HTML/plain-text parts to MIMEMultipart message\n # The email client will try to render the last part first\n message.attach(MIMEText(email_body, \"plain\"))\n message.attach(MIMEText(email_body, \"html\"))\n\n # Create secure connection with server and send email\n context = ssl.create_default_context()\n with smtplib.SMTP_SSL(smtp[\"server\"], smtp[\"port\"], context=context) as server:\n server.login(smtp[\"login\"], smtp[\"password\"])\n server.sendmail(\n smtp[\"sender\"], EMAIL_RECEIVERS, message.as_string()\n )\n\n print(\"Successfully sent email to {}\".format(message[\"To\"]))", "def delegate_last_day():\n\n regs = Registration.objects.all()\n\n template = 'notifications/last_day_mail.html'\n\n for reg in regs:\n subject = 'SciPy.in 2011: Schedule and other details'\n message = loader.render_to_string(\n template, dictionary={'name': reg.registrant.username})\n\n reg.registrant.email_user(subject=subject, message=message,\n from_email='madhusudancs@gmail.com')", "def email_body_new_proposal_notification_to_seller(meeting, buyer_name, buyer_profile_id):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\">\\n<tbody>\\n\\t<tr><td align=\"center\" valign=\"top\">\\n\\t</td></tr>\\n</tbody>\\n</table>'\n\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\">'\n\tmsg = msg + '\\n<tbody><tr>'\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tbody>'\n\tmsg = msg + '\\n\\t\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\n\\t\\t\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" />'\n\tmsg = msg + '\\n\\t\\t\\t\\t</a>'\n\tmsg = msg + '\\n\\t\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t\\t</tbody>'\n\tmsg = msg + '\\n\\t</table>'\n\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;padding-left:75px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\n\\t\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">'\n\tmsg = msg + '\\n\\t\\t\\t\\tGreat! You received a new proposal from <a href=\\\"https://127.0.0.1:5000/profile?hero=' + buyer_profile_id + '\\\" style=\"color:#1488CC\">'+ buyer_name + '</a>.'\n\tmsg = msg + '\\n\\t\\t\\t\\t<br><br><br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tTime: ' + meeting.meet_ts.strftime('%A, %b %d, %Y %H:%M %p') + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tDuration: ' + meeting.get_duration_in_hours() + ' hours<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tLocation: ' + str(meeting.meet_location) + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tFee: $' + str(meeting.meet_cost) + '<br><br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tDescription: ' + meeting.get_description_html() + '<br><br>'\n\tmsg = msg + '\\n\\t\\t\\t</font><br><br>'\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:10px;padding-left:75px;padding-bottom:150px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\n\\t\\t\\t<a href=\\\"'+ meeting.accept_url() +'\\\" style=\"color:#ffffff;text-decoration: none;display: inline-block;min-height: 38px;line-height: 39px;padding-right: 16px;padding-left: 16px;background: #1488CC;font-size: 14px;border-radius: 3px;border: 1px solid #1488CC;font-family:Garamond, EB Garamond, Georgia, serif; width:50px;text-align:center;\" target=\"_blank\">Accept</a> '\n\tmsg = msg + '\\n\\t\\t\\t<a href=\\\"'+ meeting.reject_url() +'\\\" style=\"color:#ffffff;text-decoration: none;display: inline-block;min-height: 38px;line-height: 39px;padding-right: 16px;padding-left: 16px;background: #e55e62;font-size: 14px;border-radius: 3px;border: 1px solid #e55e62;font-family:Garamond, EB Garamond, Georgia, serif; width:50px;text-align:center\" target=\"_blank\">Reject</a> '\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t</table>'\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\n\\t\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\n\\t\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\n\\t\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t</table>'\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\n\\t\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t</table>'\n\n\tmsg = msg + '\\n</tr></tbody>'\n\tmsg = msg + '</table>'\n\treturn msg", "def update_helpdesk(self, data):\n self.sr=data\n try:\n from email.mime.text import MIMEText\n from email.mime.multipart import MIMEMultipart\n except Exception, imperr:\n print(\"emailNotify failure - import error %s\" % imperr)\n return(-1)\n nHtml = []\n noHtml = \"\"\n clientEmail = ['helpdesk@mscsoftware.com']\n msg = MIMEMultipart()\n # This is the official email notifier\n rtUser = 'DONOTREPLY@mscsoftware.com'\n\n msg['From'] = rtUser\n msg['To'] = \", \".join(clientEmail)\n if self.data['groupid'] == 'Nastran-RG':\n msg[\"Cc\"] = \"msc-itsupport@mscsoftware.com,\\\n DL-ENG-BUILD@mscsoftware.com,\\\n raj.behera@mscsoftware.com\"\n elif self.data['groupid'] == 'Patran-RG':\n msg[\"Cc\"] = \"msc-itsupport@mscsoftware.com,\\\n DL-ENG-BUILD@mscsoftware.com,\\\n raj.behera@mscsoftware.com\"\n else: \n msg[\"Cc\"] = \"msc-itsupport@mscsoftware.com,\\\n DL-ENG-BUILD@mscsoftware.com,\\\n raj.behera@mscsoftware.com\"\n\n msg['Subject'] = 'Your Request SR# %s for VM provisioning \\\n reported failure for product %s' % \\\n\t\t\t ( self.sr['requestNumber'], pdict[self.data['groupid']] )\n nHtml.append(\"<html> <head></head> <body> <p>Jenkin's \\\n vCAC cloud client notification<br>\")\n nHtml.append(\"<b>Hi Helpdesk,</b><br><br><br>\")\n nHtml.append(\"Please create a ticket to solve \\\n the following problem and notify infra team.\")\n nHtml.append(\"VM creation readiness from vCAC cloud \\\n is reported failure, \\\n Product is <b>%s</b> is stuck.\" \\\n % pdict[self.data['groupid']])\n\n nHtml.append(\"Regression test for product <b>%s</b> \\\n is impacted.<br><br>\" % pdict[self.data['groupid']])\n if os.path.isdir(self.data['rundir']):\n jnfilepath=os.path.join(self.data['rundir'], 'hudjobname.dat')\n if os.path.isfile(jnfilepath):\n lines = [line.rstrip() for line in open(jnfilepath)]\n nHtml.append(\"Please follow job link for SR# \\\n related information.<br>\")\n nHtml.append(\"Jenkins Effected Job URL: \\\n <a href=%s> Effected Build \\\n Console</a><br><br><br>\" % (lines[0]))\n\n nHtml.append(\"This needs immediate attention.<br><br>\")\n nHtml.append(\"Regards,<br>\")\n nHtml.append(\"Rtest Administrator.<br>\")\n nHtml.append(\"[Note: This is an automated mail,\\\n Please do not reply to this mail.]<br>\")\n nHtml.append(\"</p> </body></html>\")\n noHtml = ''.join(nHtml)\n noBody = MIMEText(noHtml, 'html')\n msg.attach(noBody)\n s = smtplib.SMTP('postgate01.mscsoftware.com')\n s.sendmail(rtUser, [clientEmail] + \\\n msg[\"Cc\"].split(\",\"), msg.as_string())\n s.quit()\n return 0", "def test_template():\n \n # Keywords and values to be filled into the template\n items = {'item_1': 'First', 'long_keyword_item_2': 'Second',\n 'space_3': 'Third Third Third ', 'item_4': 'Fourth',\n 'item_5': None}\n \n sender = 'dummy@moc.org'\n receiver = 'dummy@moc.org'\n result = 'First Second\\nThird Third Third Fourth\\n'\n \n # TEST_DIR = os.path.dirname(os.path.abspath(__file__))\n template = os.path.abspath(os.path.join(TEST_DIR, 'test_template.txt'))\n\n msg = TemplateMessage(sender=sender, email=receiver, template=template,\n **items)\n assert msg.body == result", "def alert_service_notification(user, service):\n\n message = loader.get_template(\n 'alerts/service_notification.txt').render(\n {'user': user, 'service': service})\n\n return message", "def emailcallback(pattern, line, lines, filename):\n _dum, just_the_name = os.path.split(filename)\n subject = \"{} in {}\".format(pattern, just_the_name)\n body = ''.join(lines)\n SendEmail(subject, body).start()\n logger.info(\"{} in {}\".format(pattern, filename))", "def email_body_cancellation_from_buyer_outside_24_hours(buyer_name, sellr_name):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\"> Shucks. You cancelled your appointment. Thanks for letting <a href=\"#\" style=\"color:#1488CC\">' + sellr_name + '</a> know ahead of time; you will not be charged for the cancellation.<br><br>'\n\tmsg = msg + '\\t\\t\\t Need to reschedule? Go right ahead. <br><br>'\n\tmsg = msg + '\\t\\t\\t You can also explore other options, too. </font><br><br>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #e6e6e6;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def notify(nt_id, application, action, remedy, subj, heading):\n\n email = get_email(nt_id)\n lambda_client = boto3.client('lambda')\n messages = create_messages(application, action, remedy)\n print(email)\n email_data = {\n 'sender_mail': SENDER_EMAIL,\n 'email': email,\n 'subj': subj,\n 'heading': heading,\n 'messages': messages,\n 'region': os.environ.get(\"AWS_DEFAULT_REGION\")\n }\n invoke_email_response = lambda_client.invoke(\n FunctionName= os.environ.get(\"formatted_email\"),\n InvocationType= \"RequestResponse\",\n Payload= json.dumps(email_data)\n )\n err = checkError(invoke_email_response, \"Error sending email!\")\n if err:\n print(str(err))\n\n slack_data = {\n 'application_url': APP_URL,\n 'channel': CHANNEL,\n 'message': messages[1].rsplit(\"\\n\",5)[0],\n 'channel_id': CHANNEL_ID,\n 'nt_ids': [nt_id]\n }\n invoke_slack_response = lambda_client.invoke(\n FunctionName= os.environ.get(\"slack_message\"),\n InvocationType= \"RequestResponse\",\n Payload= json.dumps(slack_data)\n )\n err = checkError(invoke_slack_response, \"Error sending slack message!\")\n if err:\n print(str(err))", "def __str__(self):\n email_template = '\\n'.join((f'\\n\\nDear {self._full_name},\\n',\n f'Thank you for your very kind donation of ${self.last_donation:.2f}.\\n',\n 'It will be put to very good use.\\n',\n ' Sincerely,',\n ' -The Team\\n'))\n return email_template", "def execute(self):\n return LOGGER.info(f\"{datetime.datetime.now()} - Sending EMail to the configured email list\")", "def report_preparation(data):\n report_file_path = (\n f'{os.path.abspath(\".\")}/{Common.get_config_value(\"report_location\")}'\n )\n fd = open(f\"{report_file_path}/mail_report.html\", \"w\")\n fd.write(\n \"\"\"\n <html>\n <head>\n <meta http-equiv=\"Content-Type\" content=\"text/html charset=UTF-8\" />\n <style>\n table {\n font-family: arial, sans-serif;\n border-collapse: collapse;\n width: 100%;\n }\n\n th {\n border: 1px solid #000000;\n text-align: center;\n padding: 8px;\n }\n td {\n border: 1px solid #000000;\n text-align: center;\n padding: 8px;\n }\n </style>\n </head>\n\n <body>\n <p><font color=\"black\"> Hi All </font></p>\n \"\"\"\n )\n fd.write(\n \"\"\"\n <p><font color=\"black\">{}\n </font></p>\n <table>\n <thead>\n <tr>\n <th> Job Category </th>\n <th> Highlighted information/Test Failure</th>\n <th> Job URL </th>\n <th> Bugzilla </th>\n <th> Job Status </th>\n </tr></thead> \"\"\".format(\n data[\"body\"]\n )\n )\n data.pop(\"body\")\n report_file_path = (\n f'{os.path.abspath(\".\")}/{Common.get_config_value(\"report_location\")}'\n )\n\n if os.path.isfile(f\"{report_file_path}/subject\"):\n os.remove(f\"{report_file_path}/subject\")\n if os.path.isfile(f\"{report_file_path}/recipient\"):\n os.remove(f\"{report_file_path}/recipient\")\n with open(f\"{report_file_path}/subject\", \"wb\") as handler:\n pickle.dump(data[\"subject\"], handler)\n data.pop(\"subject\")\n\n with open(f\"{report_file_path}/recipient\", \"wb\") as handler:\n pickle.dump(data[\"recipient\"], handler)\n data.pop(\"recipient\")\n for _ in data:\n fd.write(\"<tr><td>{}</td>\".format(_, data[_]))\n fd.write(\"<td>\")\n for content in data[_][\"highlighted_information\"]:\n if (content.lstrip()).rstrip():\n if re.search(r\"tests.\", f\"{content}\"):\n fd.write(\n f'<font color=red><li align=\"left\">{(content.lstrip()).rstrip()}</li></font>'\n )\n else:\n fd.write(f'<li align=\"left\">{(content.lstrip()).rstrip()}</li>')\n fd.write(\"</td>\")\n fd.write(f\"<td><a href={data[_]['Build Url']}>Job Link</a></td>\")\n fd.write(\"<td>\")\n for bz in data[_][\"bugzilla\"].split(\".\"):\n if bz.lstrip().rstrip():\n fd.write(\n f\" <a href=https://bugzilla.xyz.com/show_bug.cgi?id={bz}>{bz}</a> \"\n )\n else:\n fd.write(f\"{bz}\")\n fd.write(\"</td>\")\n if data[_][\"Build_Status\"] == \"SUCCESS\":\n color = \"green\"\n fd.write(f\"<td><font color={color}>PASSED</font></td>\")\n else:\n color = \"red\"\n fd.write(f\"<td><font color={color}>FAILED</font></td>\")\n fd.write(\n \"\"\"\n </table>\n </body>\n <p><font color=\"black\">Note: For more details</font>\n <form action=\"https://wikipage></form></p>\n <p><font color=\"black\">Thanks</font><br>\n <font color=\"black\">xyz</font><p>\n </html>\"\"\"\n )\n fd.close()\n Common.logger.info(\"Report prepared for the selected job and their type\")", "def create_messages(application, action, remedy):\n\n messages = [] \n messages.append(\"\"\"Your Resources: </br><pre style=\"margin-left: 40px\">\"\"\" + application + \"</br></pre>\" + action + \"\"\" in AWS. <strong style=\"font-family: 'Helvetica Neue',Helvetica,Arial,sans-serif; box-sizing: border-box; font-size: 14px; margin: 0;\">\"\"\" + remedy +\"\"\"</strong>\n </td>\n </tr><tr style=\"font-family: 'Helvetica Neue',Helvetica,Arial,sans-serif; box-sizing: border-box; font-size: 14px; margin: 0;\"><td class=\"content-block\" style=\"font-family: 'Helvetica Neue',Helvetica,Arial,sans-serif; box-sizing: border-box; font-size: 14px; vertical-align: top; margin: 0; padding: 0 0 20px;\" valign=\"top\">\n This message was sent to inform you of changes happening to your resources.\n <ul>\n <li>New instances are auto-tagged with an expiration date, an NT ID, and a patch group if invalid.</li>\n <li>Instances without the necessary tags are notified through email and Slack.</li>\n </ul>\n If you have any further questions, please reply to this email.\"\"\")\n \n messages.append(\"Your Resources:\\n\\n\" + application + \"\\n\\n\" + action + \" in AWS. \" + remedy + \"\\n\" + \n (\"\\nThis message was sent to inform you of changes happening to your resources.\\n\"\n \"\\nNew instances are auto-tagged with an expiration date, an NT ID, and a patch group if invalid.\"\n \"Instances without Owner Mail and Owner Team tags are notified through email and slack.\\n\"\n \"\\nIf you have any further questions, please reply to this email.\")) \n\n return messages", "def email_body_cancellation_from_buyer_within_48_hours_to_seller(buyer_name):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\"> Drats. <a href=\"#\" style=\"color:#1488CC\">' + buyer_name + '</a> cancelled your appointment.<br><br>'\n\tmsg = msg + '\\t\\t\\t Message <a href=\"#\" style=\"color:#1488CC\">'+buyer_name+'</a> to see if you can work out a new date and time. </font><br><br>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def test_weekly_update_email(self):\n district_list = self._district_summary_data()\n totals = self._district_summary_totals(district_list)\n c = {'district_list': district_list, 'totals': totals,\n 'url': settings.MSPRAY_WEEKLY_DASHBOARD_UPDATE_URL}\n subject = render_to_string(\n 'alerts/emails/weekly_update_subject.txt', c).replace('\\n', '')\n text_content = render_to_string(\n 'alerts/emails/weekly_update_body.txt', c)\n html_content = render_to_string(\n 'alerts/emails/weekly_update_body.html', c).replace('\\n', '')\n memory_backend = 'django.core.mail.backends.locmem.EmailBackend'\n with self.settings(EMAIL_BACKEND=memory_backend):\n weekly_update_email(['Mosh <one@example.com>'], district_list,\n totals)\n self.assertEqual(len(mail.outbox), 1)\n email = mail.outbox[0]\n self.assertEqual(email.subject, subject)\n self.assertEqual(email.body, text_content)\n self.assertEqual(email.alternatives[0][0], html_content)", "def mail_sent():\n\n url = settings.SITE_URL + '\\charts'\n subject = 'Анализ запрошенного ресурса'\n message = 'Графики популярного часа дня и дня недели {}'.format(url)\n mail_sent = send_mail(subject,\n message,\n 'admin@myshop.com',\n ['user@mail.ru,'])\n print(message)\n return mail_sent", "def notify_email(subj, message, json, logger=None):\n\n fname = os.path.join(os.path.dirname(__file__), \"config_emails.conf\")\n elist = read_file_aslist(fname, logger)\n\n if logger is not None:\n logger.debug(\"\"\"\nSubject: {}\nMessage: {}\nJson: {}\nEmails: {}\n \"\"\".format(subj, message, json, elist))", "def delegate_about_event():\n\n regs = Registration.objects.all()\n\n template = 'notifications/sprints_about_mail.html'\n\n for reg in regs:\n subject = 'SciPy.in 2011: Details of the individual events'\n message = loader.render_to_string(\n template, dictionary={'name': reg.registrant.username})\n\n reg.registrant.email_user(subject=subject, message=message,\n from_email='madhusudancs@gmail.com')", "def SendResultTask(job_id):\n job = Job.objects.get(pk=job_id)\n owner = job.owner\n msg_plain = render_to_string('wordscraper/email.txt',\n {'first_name': owner.first_name, 'last_name': owner.last_name,\n 'result_id': job.result_id})\n msg_html = render_to_string('wordscraper/email.html',\n {'first_name': owner.first_name, 'last_name': owner.last_name,\n 'result_id': job.result_id})\n send_mail('Your CULTR web scraper results', msg_plain, 'no-reply@cultrtoolkit.com',\n [job.email], html_message=msg_html, fail_silently=False)\n logger.info(\"Sent result email to owner of job %d.\" % job_id)", "def send_application_submitted_notification(application):\n candidate_name = application.candidate_name\n if application.authorized_email is not None:\n candidate_email = application.authorized_email\n else:\n candidate_email = application.questionnaire.candidate_email\n\n group_name = application.group.name\n group_email = application.rep_email\n\n cc_emails = [\n '\"%s\" <%s>' % (candidate_name, candidate_email),\n '\"%s\" <%s>' % (\n 'Our Revolution Electoral Coordinator',\n ELECTORAL_COORDINATOR_EMAIL\n ),\n ]\n from_email = 'Our Revolution <%s>' % DEFAULT_FROM_EMAIL\n to_email = [\n # Use double quotes for group name\n '\"%s\" <%s>' % (group_name, group_email),\n ]\n\n subject = \"\"\"\n Your nomination for %s has been submitted! Here are the next steps.\n \"\"\" % candidate_name\n\n d = {\n 'or_logo_secondary': OR_LOGO_SECONDARY,\n 'group_name': group_name,\n 'candidate_name': candidate_name\n }\n\n html_template = get_template('email/application_submit_email.html')\n html_content = html_template.render(d)\n text_template = get_template('email/application_submit_email.txt')\n text_content = text_template.render(d)\n\n msg = EmailMultiAlternatives(\n subject,\n text_content,\n from_email,\n to_email,\n cc=cc_emails\n )\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()", "def generate_web_service_email(details):\n subject = details[\"subject\"]\n body = details[\"message\"]\n from_email = settings.DEFAULT_FROM_ADDR\n reply_to_email = [settings.EMAIL_TARGET_W]\n to_email = details[\"email_to\"]\n\n email = GenericEmailGenerator(subject=subject, to_emails=to_email, bcc=reply_to_email, from_email=from_email,\n reply_to=reply_to_email, body=body, context={'mrkdwn': True})\n\n return email", "def notifySysOperator(self):\n msg = self.generateNotifyMessage()\n print(msg)\n # with smtplib.SMTP('smtp.gmail.com', 587) as smtp:\n # smtp.ehlo()\n # smtp.starttls()\n # smtp.ehlo()\n\n # smtp.login(\"aladinshixi@gmail.com\", \"qwerQWER123.\")\n\n # smtp.sendmail(\"aladinshixi@gmail.com\", \"aladinshixi@gmail.com\", msg)\n\n # smtp.close()\n return False", "def format_feedback_with_evaluation(self, system, feedback):\r\n context = {'msg': feedback, 'id': \"1\", 'rows': 50, 'cols': 50}\r\n html = system.render_template('{0}/open_ended_evaluation.html'.format(self.TEMPLATE_DIR), context)\r\n return html", "def noticeEMail(starttime, usr, psw, fromaddr, toaddr, subject, jobmsg):\n\n # Calculate run time\n runtime=datetime.datetime.now() - starttime\n\n # Initialize SMTP server\n server=smtplib.SMTP('smtp.gmail.com:587')\n server.starttls()\n server.login(usr, psw)\n\n # Send email\n senddate=datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')\n subject=subject\n m=\"Date: %s\\r\\nFrom: %s\\r\\nTo: %s\\r\\nSubject: %s\\r\\nX-Mailer: My-Mail\\r\\n\\r\\n\" % (senddate, fromaddr, toaddr, subject)\n msg='{}\\nJob runtime: {}'.format(jobmsg, str(runtime))\n\n server.sendmail(fromaddr, toaddr, m+msg)\n server.quit()", "def email_body_cancellation_from_seller_to_buyer():\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\"> <a href=\"#\" style=\"color:#1488CC\">{Insert user - seller}</a> cancelled your appointment.<br><br>'\n\tmsg = msg + '\\t\\t\\t Check out <a href=\"#\" style=\"color:#1488CC\">{Insert seller}</a>\\'s availability, and send a new proposal. (Sometimes, a little reshuffling can really make things happen!)</font><br><br>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def generate_html_mesg(info, open_quests, owner, tags):\n\n msg = '<html>' \\\n '<body style=\"font-family: Verdana; font-size: 1em; color: #000\">'\n msg += (\n \"<div style='padding: 10px; border-radius: 5px; background: #232f3e; \"\n \"color: #fff; font-weight: bold; font-size: 1.25em;'>\"\n \"Hermes Notifications\"\n \"</div>\"\n \"<div style='padding: 10px;'><p>This email is being sent to {} because that is the owner listed\\n\"\n \"for the systems with open Hermes labors listed below.</p>\"\n \"<p>Due dates, if any, are noted with each quest.</p>\"\n \"\".format(owner)\n )\n msg += (\n \"<p>To throw an event manually, you can run the following command \"\n \"on a shell server:</p>\"\n \"<pre style='font-size: 1.2em'>$ hermes event create [event] --host \"\n \"[hostname]</pre>\"\n \"<p>Or you can visit the quests linked below.</p></div>\".format(\n settings.frontend)\n )\n for quest_id in info[owner]:\n quest = find_quest(open_quests, quest_id)\n if quest:\n msg += (\n \"<div style='border-radius: 5px; background: #dce1e6; \"\n \"padding: 10px; margin-bottom: 10px;'>\"\n \"<span style='font-size: 1.1em; font-weight: bold'>QUEST {}</span><br/>\"\n \"<strong>CREATOR:</strong> {}<br />\"\n ).format(\n quest_id, quest.creator\n )\n if quest.target_time:\n msg += \"<strong>DUE:</strong> {}<br/>\".format(quest.target_time)\n msg += \"<strong>DESC:</strong><p> \\\"{}\\\"</p>\".format(quest.description)\n msg += \"<strong>LINK:</strong> <code>{}/v1/quests/{}</code><br/>\".format(\n settings.frontend, quest_id\n )\n else:\n msg += (\n \"<div style='border-radius: 5px; background: #dce1e6; \"\n \"padding: 10px; margin-bottom: 10px;'>\"\n \"<span style='font-size: 1.1em; font-weight: bold'>Labors not \"\n \"associated with a quest:</span><br />\"\n )\n\n msg += \"<p>Machines with labors:</p>\"\n\n msg += \"<pre style='margin-left: 10px; font-size: 1.2em'>\"\n for hostname in sorted(info[owner][quest_id]):\n if tags[hostname]:\n tags_str = \"{}\".format((\", \".join(tags[hostname])))\n else:\n tags_str = \"no services\"\n msg += \"{} ({})\\n\".format(hostname, tags_str)\n\n msg += \"</pre></div>\"\n\n msg += \"</body>\"\n\n return msg", "def read_template():\n\n text_msg = \"\"\"${PERSON_NAME} - Calling Campaign Summary - ${DATE}:\\n\n Total Called = ${TOTAL_CALLED}\\n\n Answered = ${ANSWERED}\\n\n Not Answered = ${NOT_ANSWERED}\\n\n Declines = ${DECLINES}\\n\n Remaining = ${REMAINING}\\n\n \\n\n Thank You.\"\"\"\n\n return Template(text_msg)", "def main():\n summary = process_text()\n # TODO: turn this into a PDF report\n paragraph = \"<br/>\".join(summary)\n title = \"Processed Update on {}\".format(date.today().strftime('%B %d, %Y'))\n attachment = f'{path}/processed.pdf'\n reports.generate_report(attachment, title, paragraph)\n\n # TODO: send the PDF report as an email attachment\n sender = \"automation@example.com\"\n receiver = \"{}@example.com\".format(os.environ.get('USER'))\n subject = \"Upload Completed - Online Fruit Store\"\n body = \"All fruits are uploaded to our website successfully. A detailed list is attached to this email.\"\n message = emails.generate_email(sender, receiver, subject, body, attachment)\n emails.send_email(message)", "def send_email(self, text):\n msg_text = MIMEText(text)\n msg_text['Subject'] = '[WebSite Watchdog] Failure'\n msg_text['From'] = self.from_email\n msg_text['To'] = self.to_email\n \n s = smtplib.SMTP(self.smtp_server)\n s.sendmail(self.from_email, [self.to_email], msg_text.as_string())\n s.quit()", "def send_welcome_mail(backend, details, response, user, is_new=False, *args, **kwargs):\n\n if is_new:\n context = Context({'user': user, 'ga_campaign_params' : 'utm_source=unishared&utm_content=v1&utm_medium=e-mail&utm_campaign=welcome_mail'})\n\n email_task.apply_async([u'Welcome on UniShared!', context, 'welcome_mail', [user.email]], eta= datetime.utcnow() + timedelta(hours=1))", "def create_email(username, provider):\n print(f\"Your new email is {username}@{provider}.com\")", "def render_message(template_name, extra_context={}):\n mail_text = _render_mail_template(template_name, extra_context)\n rendered_mail = mail_text.replace(u\"\\r\\n\", u\"\\n\").replace(u\"\\r\", u\"\\n\").split(u\"\\n\")\n return rendered_mail[0], \"\\n\".join(rendered_mail[1:])", "def email_body_appointment_confirmation_for_seller(meeting, buyer_profile, sellr_profile, msg_user_link='https://INSPRITE.co/message/USER'):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">Fantastic! You accepted <a href=\"https://127.0.0.1:5000/profile?' + buyer_profile.prof_id + '\" style=\"color:#1488CC\">' + buyer_profile.prof_name + '\\'s proposal.</a><br><br>'\n\tmsg = msg + '\\t\\t\\t Check out the details:<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tTime: ' + meeting.meet_ts.strftime('%A, %b %d, %Y %H:%M %p') + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tDuration: ' + meeting.get_duration_in_hours() + ' hours<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tLocation: ' + str(meeting.meet_location) + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tFee: $' + str(meeting.meet_cost) + '<br><br>'\n\tmsg = msg + '\\t\\t\\t Need to edit, manage or update the appointment? <a href=\"https://127.0.0.1:5000/dashboard\" style=\"color:#1488CC\">Go for it</a>, or send <a href=\"' + msg_user_link + '\" style=\"color:#1488CC\"> ' + buyer_profile.prof_name + ' a message.</a><br><br>We know life can be busy, so we\\'ll send you a reminder 24 hours in advance too.</font>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;padding-left:75px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\n\\t\\t\\t<img style=\"padding-right: 6px\" src=\"http://maps.googleapis.com/maps/api/staticmap?center=' + meeting.meet_location + '&zoom=15&size=400x450&markers=size:large%8Ccolor:0xFFFF00%7Clabel:Insprite%7C' + meeting.meet_location + '\"><br>'\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t</table>'\n\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<font style=\"font-family:Helvetica Neue;color:#555555;font-size:10px;\"><a href=\"mailto:thegang@insprite.co\" style=\"color:#1488CC\">Contact Us</a> '\n\tmsg = msg + '| Sent by <a href=\"https://insprite.co\" style=\"color:#1488CC\">Insprite</a>, California, USA. | <a href=\"#\" style=\"color:#1488CC\">Unsubscribe</a></font><br>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr> <td style=\"border-top: 0px solid #333333; border-bottom: 0px solid #FFFFFF;\">'\n\tmsg = msg + '<img width=\"596px\" src=\"http://ryanfbaker.com/insprite/footerImage.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def send_report_email(j_id=None):\n\n if j_id is not None and os.path.exists(gTAF_config.html_updated_report):\n copy_report = \"cp \" + gTAF_config.html_updated_report + \" \" + gTAF_config.bkp_log_path + \"/gTAF_report_updated_\" + str(j_id) + \".html\"\n print(copy_report)\n os.system(copy_report)\n\n logging.info(\"Sending report email...\")\n\n # Get total number of test cases entries in table\n filer_th_tags_cmd = \"cat \" + gTAF_config.html_report_file + \" | grep '<th>[0-9]*[0-9]</th>'\"\n sr_num, src_file = [], []\n th_tags = subprocess.check_output(filer_th_tags_cmd, shell=True)\n th_tags = th_tags.strip()\n th_tags = th_tags.split(\"\\n\")\n\n\n for i in range(0, len(th_tags)):\n tmp = th_tags[i]\n numbers = re.findall('\\d+', tmp)\n index = int(numbers[0])\n sr_num.append(index + 1)\n\n # Increase cell number by 1 in html source\n f = open(gTAF_config.html_report_file, \"r\")\n for line in f:\n tag = re.findall('<th>\\d+</th>', line)\n if len(tag) != 0:\n num = re.findall('\\d+', tag[0])\n n = int(num[0])\n tmp = '<th>' + str((n + 1)) + '</th>'\n src_file.append(tmp)\n else:\n src_file.append(line)\n f.close()\n\n rm_cmd = \"rm \" + gTAF_config.html_updated_report\n os.system(rm_cmd)\n\n # Update 'FAIL' color code and add Sr. No. in html file and write update html source code\n for ln in src_file:\n fh = open(gTAF_config.html_updated_report, 'a')\n if '<td>FAIL</td>' in ln:\n ln = ln.replace('<td>FAIL</td>', '<td style=\"color: red\">FAIL</td>')\n if '<th></th>' in ln:\n ln = ln.replace('<th></th>', '<th>Sr No.</th>')\n fh.write(ln)\n fh.write(\"\\n\")\n fh.close()\n\n # Fetch date\n date_time = subprocess.check_output('date', shell=True)\n date_time = date_time.split(' ')\n tmp = date_time[0] + '_' + date_time[2] + '_' + date_time[1]\n\n # Read html source and send email\n with open(gTAF_config.html_updated_report) as input_file:\n html_src = input_file.read()\n msg = email.message.Message()\n msg['Subject'] = 'gTAF_Test_Automation_Report_' + tmp\n msg['From'] = gTAF_config.sender_mail\n password = gTAF_config.sender_mail_pwd\n msg.add_header('Content-Type', 'text/html')\n msg.set_payload(html_src)\n s = smtplib.SMTP(gTAF_config.smtp_server)\n s.starttls()\n for i in range(len(gTAF_config.report_mail_to_list)):\n # Login Credentials for sending the mail\n print(\"Sending email to : \", gTAF_config.report_mail_to_list[i])\n s.login(msg['From'], password)\n s.sendmail(msg['From'], gTAF_config.report_mail_to_list[i], msg.as_string())\n print('Mail Successfully Sent, Please check it !!!!!')\n s.quit()", "def format_mail(template: str, event: dict, ishtml: bool) -> str:\n header = \"Someone filled the contact form\"\n subtext = \"\"\n # uuid.uuid4().hex\n unsubscribe_key = \"f4bd5dd85908487b904ea189fb81e753\" # Not actually applicable for Admin email ID\n keys = ['firstName', 'lastName', 'email', 'subject', 'message']\n for key in keys:\n if ishtml:\n value = html.escape(event[key]).replace('\\n', '<br/>')\n subtext += \"{}: {}<br>\".format(key, value)\n else:\n subtext += \"{}: {}\\n\".format(key, event[key]).replace('\\n', '\\r\\n')\n template = template.replace('{{header}}', header)\n template = template.replace('{{subtext}}', subtext)\n template = template.replace('{{unsubscribe-key}}', unsubscribe_key)\n return template", "def get_email():\n return Email(\n subject='[Messages] Integration Test',\n body='Conducting Integration Testing',\n attachments=str(TESTDIR.joinpath('file2.png')))", "def email_body_cancellation_from_buyer_within_24_hours_to_seller(buyer_name, cost):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\"> <a href=\"#\" style=\"color:#1488CC\"> ' + buyer_name + ' </a> cancelled your appointment.<br><br>'\n\tmsg = msg + '\\t\\t\\t Sometimes things come up in life, but your time and talent are still valuable. You\\'ll receive '+ str(cost) +' from ' + buyer_name + ' for the cancelled booking.</font><br><br>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def generate_html_reviews_email(event, recommendations):\n pull_request_id = event['PullRequestId']\n lines_scanned = event['Metrics']['MeteredLinesOfCodeCount']\n recommendations_found = event['Metrics']['FindingsCount']\n recommendations_html_table_contents = [\n f\"\"\"\n <tr>\n <td>{recommendation['FilePath']}</td>\n <td>{recommendation['StartLine']}</td>\n <td>{recommendation['EndLine']}</td>\n <td>{recommendation['Description']}</td>\n </tr>\n \"\"\"\n for recommendation in recommendations\n ]\n recommendations_html_table = '\\n'.join(recommendations_html_table_contents)\n html_body = f\"\"\"\n <!DOCTYPE html>\n <html>\n <head>\n </head>\n <body>\n <p>Hi team,</p>\n <p>Amazon CodeGuru Reviewer has completed the review of {pull_request_id}, \n scanning {lines_scanned} and found {recommendations_found} of recommendations.</p>\n <p>See Table below for the recommendations, and review the BUSINESS LOGIC in the pull request</p>\n <table style=\"width:100%\">\n <tr>\n <th>File Path</th>\n <th>Start Line</th>\n <th>End Line</th>\n <th>Description/Recommendation</th>\n </tr>\n {recommendations_html_table}\n </table>\n </body>\n </html>\n \"\"\"\n return html_body", "def send_realtime_email(self,body_):\n import smtplib, ssl\n\n port = 465 # For SSL\n smtp_server = \"smtp.gmail.com\"\n sender_email = self.fromaddr # Enter your address\n receiver_email = self.toaddr # Enter receiver address\n password = self.pswd\n message = f\"\"\"\\\nSubject: [Test] Twitter real time (half) hourly trending alert\n\n{body_}\"\"\"\n\n context = ssl.create_default_context()\n # send to multiple emails\n for receiver in receiver_email:\n with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:\n server.login(sender_email, password)\n server.sendmail(sender_email, receiver, message)\n \n print(f'Email successfully sent to {receiver}')", "def send_warning(self):\n\n # Check whether all the necessary parameters for SMS are present\n if self.your_phone != '' and self.twilio_phone != '' and self.account_sid != '' and self.auth_token != '':\n client = Client(self.account_sid, self.auth_token)\n\n try:\n sms = client.messages.create(\n body=\"\"\"Last will: It was at least 30 days since your last check in. \n This is a reminder to check in in the next 24 hours.\"\"\",\n from_=self.twilio_phone,\n to=self.your_phone)\n sms\n print(\"\\nSMS sent\")\n except Exception as e:\n print(f\"An error occurred while trying to send the SMS. Error: {e}\")\n\n else:\n print(\"\\nMissing SMS parameters. SMS not sent\")\n\n # Check whether all the necessary parameters for email are present\n if self.sender_name != '' and self.recipient_email != '' and self.email != '' and self.email_pwd != '':\n message = f\"\"\"It has been at least 30 days since you last checked in. \nYou need to check in in the next 24 hours.\\n\nOtherwise at {self.deadline} the email with the important info will be sent to the designated recipient.\\n\nIn order to reset simply go to the working directory and run python3 last_will.py\"\"\"\n\n # send_email will return 0 if everything went ok, otherwise it will return an error message\n status = send_email(self.sender_name, self.your_email,\n self.email, self.email_pwd,\n subject='Last will: Reminder to check in', unencrypted_message=message)\n\n if status != 0:\n print(status)\n exit(1)\n else:\n print(\"Email sent\\n\")\n\n print(f\"You have until {self.deadline} to check in. \"\n f\"In order to do that simply go to the working directory and run ./last_will.sh\\n\")\n else:\n print(\"Missing email parameters. Email not sent.\\n\")\n exit(1)", "def send_emails_to_subscribers(creator_id, exploration_id, exploration_title):\n\n creator_name = user_services.get_username(creator_id)\n email_subject = ('%s has published a new exploration!' % creator_name)\n email_body_template = (\n 'Hi %s,<br>'\n '<br>'\n '%s has published a new exploration! You can play it here: '\n '<a href=\"https://www.oppia.org/explore/%s\">%s</a><br>'\n '<br>'\n 'Thanks, and happy learning!<br>'\n '<br>'\n 'Best wishes,<br>'\n '- The Oppia Team<br>'\n '<br>%s')\n\n if not feconf.CAN_SEND_EMAILS:\n log_new_error('This app cannot send emails to users.')\n return\n\n if not feconf.CAN_SEND_SUBSCRIPTION_EMAILS:\n log_new_error('This app cannot send subscription emails to users.')\n return\n\n recipient_list = subscription_services.get_all_subscribers_of_creator(\n creator_id)\n recipients_usernames = user_services.get_usernames(recipient_list)\n recipients_preferences = user_services.get_users_email_preferences(\n recipient_list)\n for index, username in enumerate(recipients_usernames):\n if recipients_preferences[index].can_receive_subscription_email:\n email_body = email_body_template % (\n username, creator_name, exploration_id,\n exploration_title, EMAIL_FOOTER.value)\n _send_email(\n recipient_list[index], feconf.SYSTEM_COMMITTER_ID,\n feconf.EMAIL_INTENT_SUBSCRIPTION_NOTIFICATION,\n email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)", "def render(self, test_mode=False, html_only=False):\n context = aq_inner(self.context)\n if not context.auto_enabled:\n return 'N/A'\n\n now = datetime.now()\n wtool = getToolByName(context, 'portal_workflow')\n soup = getSoup(self.context, config.SUBSCRIBERS_SOUP_ID)\n # strftime accepts any text, not only strftime characters\n subject = now.strftime(context.auto_subject.encode('utf-8'))\n url = context.absolute_url() + '/subscription?uuid=%(uuid)s'\n footer_text = context.footer.output.replace('${url}', '$url')\n footer_text = footer_text.replace('$url', url)\n count = 0\n base_text = ''\n if context.auto_text:\n base_text += now.strftime(context.auto_text.output.encode('utf-8')) + '\\n'\n providers = self._providers()\n gid = 'issue-%s' % now.strftime(\"%Y-%m-%d-%H-%M-%S.%f\")\n idx = 0\n while context.check_id(gid): # python script in skins\n idx += 1\n gid = 'issue-%s-%d' % (now.strftime(\"%Y-%m-%d-%H-%M-%S.%f\"), idx)\n # create anonymous issue text to be stored to portal\n text = safe_unicode(base_text)\n auto_text = u''\n provider_names = []\n\n for p in providers:\n auto_text += safe_unicode(p.get_gazette_text(context, None))\n provider_names.append(repr(p))\n\n if not auto_text:\n # There is no automatically geenrated text. Discard sending of newsletter.\n return 'Nothing to send'\n\n text = text + auto_text\n # Create PDF version of the newsletter using wkhtml2pdf as archive of the issue\n pdf_raw = self.make_pdf(text, html_only)\n if not pdf_raw:\n logger.warning('Unable to create PDF of automatically issued gazette.')\n if not test_mode:\n # create Gazette object representing this issue\n gid = context.invokeFactory('gazette.GazetteIssue', gid)\n gazette = context[gid]\n # Fill the newly create Gazette object with generated data\n gazette.title = subject\n gazette.text = RichTextValue(text, mimeType='text/html', outputMimeType='text/html')\n gazette.providers = provider_names\n gazette.sent_at = now\n try:\n # ignore if there is no publish option for now\n wtool.doActionFor(gazette, 'publish')\n except:\n pass\n # Attach PDF to gazette but only if it is not HTML only mode\n if pdf_raw and not html_only:\n fid = gazette.invokeFactory('File', gid + '.pdf')\n file_pdf = gazette[fid]\n file_pdf.setTitle(gazette.title)\n file_pdf.setFile(pdf_raw, mimetype='application/pdf')\n file_pdf.processForm()\n\n for s in soup.query(active=True):\n # returns email and fullname taken from memberdata if s.username is set and member exists\n subscriber_info = s.get_info(context)\n footer = footer_text % subscriber_info\n mail_text = \"\"\n if subscriber_info['salutation']:\n mail_text += \"%s<br /><br />\" % subscriber_info['salutation']\n mail_text += \"%s------------<br />%s\" % (text, footer)\n try:\n if utils.send_mail(context, None, subscriber_info['email'], subscriber_info['fullname'], subject, mail_text):\n count += 1\n except (SMTPException, SMTPRecipientsRefused):\n pass\n context.most_recent_issue = gazette\n else:\n if html_only:\n self.request.response.setHeader('Content-Type', 'text/html;charset=utf-8')\n else:\n self.request.response.setHeader('Content-Type', 'application/pdf')\n return pdf_raw\n\n return str(count)", "def send_welcome_email(cls, user):\n\n cls.customise_auth_messages()\n auth_messages = current.auth.messages\n\n # Look up CMS template for welcome email\n try:\n recipient = user[\"email\"]\n except (KeyError, TypeError):\n recipient = None\n if not recipient:\n current.response.error = auth_messages.unable_send_email\n return\n\n\n db = current.db\n s3db = current.s3db\n\n settings = current.deployment_settings\n\n # Define join\n ctable = s3db.cms_post\n ltable = s3db.cms_post_module\n join = ltable.on((ltable.post_id == ctable.id) & \\\n (ltable.module == \"auth\") & \\\n (ltable.resource == \"user\") & \\\n (ltable.deleted == False))\n\n # Get message template\n query = (ctable.name == \"WelcomeMessageInvited\") & \\\n (ctable.deleted == False)\n row = db(query).select(ctable.doc_id,\n ctable.body,\n join = join,\n limitby = (0, 1),\n ).first()\n if row:\n message_template = row.body\n else:\n # Disabled\n return\n\n # Look up attachments\n dtable = s3db.doc_document\n query = (dtable.doc_id == row.doc_id) & \\\n (dtable.file != None) & (dtable.file != \"\") & \\\n (dtable.deleted == False)\n rows = db(query).select(dtable.file)\n attachments = []\n for row in rows:\n filename, stream = dtable.file.retrieve(row.file)\n attachments.append(current.mail.Attachment(stream, filename=filename))\n\n # Default subject from auth.messages\n system_name = s3_str(settings.get_system_name())\n subject = s3_str(auth_messages.welcome_email_subject % \\\n {\"system_name\": system_name})\n\n # Custom message body\n data = {\"system_name\": system_name,\n \"url\": settings.get_base_public_url(),\n \"profile\": URL(\"default\", \"person\", host=True),\n }\n message = formatmap(message_template, data)\n\n # Send email\n success = current.msg.send_email(to = recipient,\n subject = subject,\n message = message,\n attachments = attachments,\n )\n if not success:\n current.response.error = auth_messages.unable_send_email" ]
[ "0.6600645", "0.6544189", "0.63650763", "0.63307947", "0.6069563", "0.60685426", "0.6014015", "0.5973316", "0.59537005", "0.5941229", "0.58852065", "0.5875624", "0.58755213", "0.58395034", "0.582515", "0.5794151", "0.57777476", "0.5777449", "0.5768729", "0.57670987", "0.5743941", "0.5721738", "0.5721314", "0.5690929", "0.56816775", "0.5668132", "0.56658256", "0.56394345", "0.5638623", "0.56370306", "0.5634705", "0.5621308", "0.5618624", "0.56181216", "0.5602861", "0.5595808", "0.5591508", "0.55883557", "0.5587828", "0.5583868", "0.55823344", "0.5575406", "0.55635947", "0.55612373", "0.5555391", "0.5545472", "0.5543036", "0.5517844", "0.55154485", "0.5512823", "0.55058455", "0.5504276", "0.54755545", "0.5475147", "0.54748625", "0.5472065", "0.54691374", "0.5467081", "0.54665756", "0.5466009", "0.54557824", "0.54543567", "0.5452524", "0.5451938", "0.54403305", "0.5428354", "0.5425823", "0.5425334", "0.5420432", "0.5419861", "0.54143214", "0.5403045", "0.5401512", "0.53994524", "0.53911024", "0.5390561", "0.5370147", "0.53637975", "0.53616935", "0.5361499", "0.5361249", "0.5355305", "0.5350795", "0.53502595", "0.5348162", "0.53455085", "0.5343846", "0.5338569", "0.53243715", "0.53238666", "0.5319309", "0.5317109", "0.5315688", "0.5305963", "0.53032154", "0.52881145", "0.52870494", "0.52833", "0.5277282", "0.5271915" ]
0.79926217
0
Decorator to force workers to complete their profiles. This decorator will redirect workers with incomplete profiles to the detailed profile subscription view.
def documents_required(function=None): def _dec(view_func): def _view(request, *args, **kwargs): _user = request.user if _user.is_authenticated() and _user.is_worker() and\ (not _user.is_application_form_filled): return redirect('/profissional/subscription/', permanent=True) else: return view_func(request, *args, **kwargs) _view.__name__ = view_func.__name__ _view.__dict__ = view_func.__dict__ _view.__doc__ = view_func.__doc__ return _view if function is None: print("Funciont is none") return _dec else: print("There is some value for function") return _dec(function)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setProfileJobs(self,profile=False):\n self.__profileJobs = profile", "def complete(request, backend, *args, **kwargs):\n return do_complete(request.social_strategy, _do_login, user=None,\n redirect_name='home', *args, **kwargs)", "def profile():\n if g.user:\n return render_template('profile.html', user=g.user)\n return redirect(url_for('login'))", "def link_profiles(request, device=None, profile=None):\n context = {}\n if device is not None:\n resource_type = \"profiles\"\n rel = get_object_or_404(Laptop, pk=device)\n options = ConfigurationProfile.objects.exclude(Q(pending_install__in=[rel]) | Q(installed__in=[rel]))\n # The following message will be displayed if there are no options (doesn't render in the form view)\n context['message'] = \"It seems like there are no more profiles that can be assigned to this device.\"\n else:\n resource_type = \"devices\"\n rel = get_object_or_404(ConfigurationProfile, pk=profile)\n options = Laptop.objects.filter(mdm_enrolled=True, retired=False)\\\n .exclude(Q(pending__in=[rel]) | Q(installed__in=[rel]))\n # The following message will be displayed if there are no options (doesn't render in the form view)\n context['message'] = \"It seems like there are no more eligible devices to assign this profile to.\"\n if request.method == 'POST':\n form = AssignmentForm(request.POST, type=resource_type, options=options)\n if form.is_valid():\n selected = form.cleaned_data.get('options')\n context['NO_FOOT'] = True\n if isinstance(rel, Laptop):\n for option in selected:\n config = ConfigurationProfile.objects.get(pk=option)\n rel.pending.add(config)\n if len(selected) == 1:\n context['message'] = \"1 profile was assigned to %s\" % rel.name\n else:\n context['message'] = \"%s profiles were assigned to %s\" % (len(selected), rel.name)\n elif isinstance(rel, ConfigurationProfile):\n for option in selected:\n device = Laptop.objects.get(name=option)\n rel.pending_install.add(device)\n context['message'] = \"This profile has been assigned to %s new device(s)\" % (len(selected))\n context['title'] = \"Success!\"\n context['EXIT_BTN'] = True\n context['EXIT_URL'] = reverse(\"mdm:list\")\n return render(request, 'default.html', context)\n else:\n context['form'] = form\n else:\n if options.count() == 0:\n context['title'] = \"Hmm...\"\n context['NO_FOOT'] = True\n return render(request, 'default.html', context)\n context['form'] = AssignmentForm(type=resource_type, options=options)\n return render(request, 'form_crispy.html', context)", "def profile_required(func=None, login_url=None):\n if not func:\n return partial(profile_required, login_url=login_url)\n\n @wraps(func)\n @login_required(login_url=login_url)\n def wrapped(request, *args, **kwargs):\n if not request.user.profile:\n return redirect('flicks.users.profile')\n return func(request, *args, **kwargs)\n return wrapped", "def self_profile_view(request):\n context = RequestContext(request)\n context_dict = {}\n user = request.user\n user_profile_object = UserProfile.objects.get(user=user)\n if user_profile_object.is_new:\n return HttpResponseRedirect('/user/')\n\n # Social Profile\n try:\n social_profiles_object = SocialProfile.objects.get(parent=user_profile_object)\n except SocialProfile.DoesNotExist:\n social_profiles_object = None\n\n # Education Profile\n try:\n eduObjs = EducationDetails.objects.filter(parent=user_profile_object)\n except EducationDetails.DoesNotExist:\n eduObjs = None\n\n # Employment Profile\n try:\n empObjs = EmploymentDetails.objects.filter(parent=user_profile_object)\n except EmploymentDetails.DoesNotExist:\n empObjs = None\n\n try:\n timingsObj = Timings.objects.get(parent=user)\n except:\n timingsObj = None\n # TODO add personal details after the user model\n # is finalized\n # initialize all to None\n context_dict['full_name'] = None\n context_dict['gender'] = None\n context_dict['date_of_birth'] = None\n context_dict['city'] = None\n context_dict['country'] = None\n context_dict['college'] = None\n context_dict['email'] = None\n context_dict['contact_number'] = None\n context_dict['about'] = None\n context_dict['provider'] = None\n context_dict['picture_url'] = None\n context_dict['profile_url'] = None\n context_dict['edu_list'] = None\n context_dict['emp_list'] = None\n\n gender_options = {'male': \"M\", 'female': 'F'}\n\n name = user.first_name + \" \" + user.last_name\n context_dict['full_name'] = name\n\n gender = user_profile_object.gender\n if gender in gender_options.keys():\n context_dict['gender'] = gender_options[gender]\n\n date_of_birth = user_profile_object.date_of_birth\n if date_of_birth != '':\n context_dict['date_of_birth'] = date_of_birth\n\n city = user_profile_object.city\n if city != '':\n context_dict['city'] = city\n\n country = user_profile_object.country\n if country != '':\n context_dict['country'] = country\n\n email_field = user.email\n if email_field != None:\n context_dict['email'] = email_field\n\n contact = user_profile_object.contact\n if contact != None:\n context_dict['contact_number'] = contact\n\n about = user_profile_object.about\n if about:\n context_dict['about'] = about\n\n college = user_profile_object.college\n if college:\n context_dict['college'] = college\n\n picture_url = user_profile_object.picture\n if picture_url:\n context_dict['picture_url'] = picture_url\n\n provider = None\n\n profile_url = None\n\n if social_profiles_object:\n\n if social_profiles_object.profile_pic_url_linkedin:\n provider = \"LinkedIn\"\n picture_url = social_profiles_object.profile_pic_url_linkedin\n profile_url = social_profiles_object.profile_url_linkedin\n\n # If there is no pic uploaded, render LinkedIn pic\n if (not user_profile_object.picture):\n context_dict['pic_url'] = picture_url\n\n elif social_profiles_object.profile_pic_url_facebook:\n provider = \"Facebook\"\n picture_url = social_profiles_object.profile_pic_url_facebook\n profile_url = social_profiles_object.profile_url_facebook\n\n if provider:\n context_dict['provider'] = provider\n\n if context_dict['picture_url'] is None:\n context_dict['picture_url'] = picture_url\n\n if profile_url != None:\n context_dict['profile_url'] = profile_url\n if eduObjs:\n edu_list = []\n for obj in eduObjs:\n edu_list.append({'inst': obj.institution, 'city': obj.city, 'state': obj.state, 'country': obj.country,\n 'degree': obj.degree,\n 'branch': obj.branch, 'from': obj.from_year, 'to': obj.to_year, 'coun': obj.country})\n\n context_dict['edu_list'] = edu_list\n\n if empObjs:\n emp_list = []\n for obj in empObjs:\n emp_list.append({'org': obj.organization, 'loc': obj.location, 'pos': obj.position,\n 'from': obj.from_year, 'to': obj.to_year})\n\n context_dict['emp_list'] = emp_list\n\n context_dict['mentee_count'] = Request.objects.filter(mentorId=user.id, is_completed=True).count()\n rating_obj = {}\n try:\n rating_obj = Ratings.objects.get(mentor=user)\n average = int(round(rating_obj.average))\n rating_obj.activeStars = 'x' * average\n rating_obj.inactiveStars = 'x' * (5 - average)\n\n except ObjectDoesNotExist:\n rating_obj['count'] = 0\n rating_obj['one'] = 0\n rating_obj['two'] = 0\n rating_obj['three'] = 0\n rating_obj['four'] = 0\n rating_obj['five'] = 0\n rating_obj['average'] = 0\n\n context_dict['ratings'] = rating_obj\n\n context_dict['ratings'] = rating_obj\n\n # Specify timings too\n if timingsObj is not None:\n context_dict['weekday_l'] = timingsObj.weekday_l\n context_dict['weekday_u'] = timingsObj.weekday_u\n context_dict['weekend_l'] = timingsObj.weekend_l\n context_dict['weekend_u'] = timingsObj.weekend_u\n\n return render_to_response(\"mentor/profile-view.html\", context_dict, context)", "async def profile(self, ctx:utils.Context):\n\n pass", "def download_profile(self, request, user_id=None):\n current_url = '%s?%s' % (\n reverse(request.resolver_match.url_name, kwargs={'user_id': user_id}),\n urlencode(request.query_params)\n )\n login_url = '/signin?next=%s' % quote_plus(current_url)\n if not request.user.is_authenticated():\n return redirect(login_url)\n\n user = get_object_or_404(self.get_queryset(), pk=user_id)\n\n try:\n self.check_object_permissions(request, user)\n except NotAuthenticated:\n return redirect(login_url)\n except PermissionDenied:\n return HttpResponse(\"You do not have permission to access this estimate\")\n\n ctx = {\n 'user': user,\n 'profile': user.profile,\n 'work': user.work_set.all(),\n 'education': user.education_set.all()\n }\n\n rendered_html = render_to_string(\"tunga/pdf/profile.html\", context=ctx).encode(encoding=\"UTF-8\")\n\n if request.accepted_renderer.format == 'html':\n return HttpResponse(rendered_html)\n\n pdf_file = HTML(string=rendered_html, encoding='utf-8').write_pdf()\n http_response = HttpResponse(pdf_file, content_type='application/pdf')\n http_response['Content-Disposition'] = 'filename=\"developer_profile.pdf\"'\n return http_response", "def socialauth_complete(request, backend, *args, **kwargs): \n if request.user.is_authenticated():\n return sviews.associate_complete(request, backend, *args, **kwargs)\n else:\n return complete_process(request, backend, *args, **kwargs)", "def complete(request, backend):\n return complete_process(request, backend)", "def has_profile(view):\n\n @wraps(view)\n @logged_in\n def fn(request, secret, trader_id, *args, **kargs):\n userinfo = db.get_userinfo(trader_id, get_language())\n if not userinfo:\n return login(request, method='GET')\n elif not userinfo['has_profile']:\n db.delete_loginkey(trader_id)\n return report_no_profile(request)\n elif (userinfo['banned_until_ts'] > datetime.datetime.now(pytz.utc)\n or userinfo['accumulated_transaction_cost'] > settings.CMBARTER_TRX_COST_QUOTA):\n return HttpResponseForbidden()\n else:\n if not hasattr(request, '_cmbarter_trx_cost'):\n request._cmbarter_trx_cost = 0.0\n try:\n # The next call may affect request._cmbarter_trx_cost\n response = view(request, secret, userinfo, *args, **kargs)\n except Http404:\n report_transaction_cost(db, trader_id, request._cmbarter_trx_cost)\n request._cmbarter_trx_cost = 0.0\n raise\n else:\n report_transaction_cost(db, trader_id, request._cmbarter_trx_cost)\n request._cmbarter_trx_cost = 0.0\n return response\n\n return fn", "def profiles(self):\n with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:\n return list(filter(lambda x: x is not None, executor.map(self.profile_details, self.profiles_names())))", "def profile_access(f):\n def wrapper(request, slug, **kwargs):\n try:\n profile = models.AttendeeProfile.objects\\\n .select_related('user')\\\n .get(slug=slug)\n except models.AttendeeProfile.DoesNotExist:\n raise http.Http404()\n\n if request.user.is_staff or request.user == profile.user:\n full_access = True\n else:\n full_access = False\n # se il profilo appartiene ad uno speaker con dei talk \"accepted\" è\n # visibile qualunque cosa dica il profilo stesso\n accepted = models.TalkSpeaker.objects\\\n .filter(speaker__user=profile.user)\\\n .filter(talk__status='accepted')\\\n .count()\n if not accepted:\n # Se la votazione comunitaria à aperta e il profilo appartiene\n # ad uno speaker con dei talk in gara la pagina è visibile\n conf = models.Conference.objects.current()\n if not (settings.VOTING_OPENED(conf, request.user) and settings.VOTING_ALLOWED(request.user)):\n if profile.visibility == 'x':\n return http.HttpResponseForbidden()\n elif profile.visibility == 'm' and request.user.is_anonymous():\n return http.HttpResponseForbidden()\n return f(request, slug, profile=profile, full_access=full_access, **kwargs)\n return wrapper", "def profiles(self, profiles):\n\n self._profiles = profiles", "def get(self):\n\n try:\n health.UnlinkProfile()\n except gdata.service.RevokingOAuthTokenFailed:\n pass\n\n self.redirect('/')", "def __call__(self,request):\n if not request.user.is_anonymous:\n if not request.user.is_staff:\n profile = request.user.profile\n if not profile.picture or not profile.biography:\n if request.path not in [reverse('users:update'), reverse('users:logout')]:\n return redirect('users:update')\n response = self.get_response(request)\n return response", "def profiles():\n \n if 'username' in session:\n profiles = mongo.db.profiles.find()\n return render_template('pages/profiles.html', title='Profiles', profiles = profiles)\n flash('Please login to view user profiles.', 'warning')\n return redirect(url_for('login'))", "def profile(request):\r\n user = request.user\r\n projects = [(sub.project, sub.group) for sub in user.subscribeduser_set.all()]\r\n if request.method == 'POST':\r\n shortname = request.POST['shortname']\r\n project = Project.objects.get(shortname = shortname)\r\n sub = SubscribedUser.objects.get(project = project, user = user)\r\n if sub.group == 'Owner':\r\n raise Exception('Cannot delete owner')\r\n else:\r\n sub.delete()\r\n elif request.method == 'GET':\r\n pass\r\n payload = {'projects':projects}\r\n return render(request, 'registration/profile.html', payload)", "def test_private_rooms_have_profiles_requested(self) -> None:\n\n async def get_remote_profile(\n user_id: str, ignore_backoff: bool = True\n ) -> JsonDict:\n if user_id == \"@bruce:remote\":\n return {\n \"displayname\": \"Sir Bruce Bruceson\",\n \"avatar_url\": \"mxc://remote/789\",\n }\n else:\n raise ValueError(f\"unable to fetch {user_id}\")\n\n with patch.object(self.profile_handler, \"get_profile\", get_remote_profile):\n # Continue from the earlier test...\n self.test_private_rooms_do_not_have_profiles_collected()\n\n # Advance by a minute\n self.reactor.advance(61.0)\n\n profiles = self.get_success(\n self.user_dir_helper.get_profiles_in_user_directory()\n )\n self.assertEqual(\n profiles.get(\"@bruce:remote\"),\n ProfileInfo(\n display_name=\"Sir Bruce Bruceson\", avatar_url=\"mxc://remote/789\"\n ),\n )", "def inaccessible_callback(self, name, **kwargs):\n return redirect(url_for('public.home', next=request.url))", "def profile(length, profile_dir):\n from werkzeug.middleware.profiler import ProfilerMiddleware\n app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[length],\n profile_dir=profile_dir)\n app.run()", "def is_profile_complete(self):\n return self.height is not None and self.target_weight is not None and self.sex is not None", "def training_report(request, application_slug, attach=True):\n try:\n application = CredentialApplication.objects.get(slug=application_slug)\n except ObjectDoesNotExist:\n raise Http404()\n\n if request.user == application.user or request.user.is_admin:\n try:\n return utility.serve_file(application.training_completion_report.path,\n attach=attach)\n except FileNotFoundError:\n raise Http404()\n\n raise PermissionDenied()", "def teacher_progress():\n for teacher in Users.query.filter_by(is_teacher=True).all(): # type: Users\n body = teacher.render_progress_report()\n\n if body is None:\n # teacher had no students\n continue\n\n mg_send([teacher.parent_email], \"Code Challenge Student Progress\", body)\n return \"OK\", 200", "def _release_profile(self, index):\n with self._profile_lock:\n self._available_profiles[index] = True", "def profile():\n \n return render_template(\"profile.html\")", "def call(self, failed=[], deletion=False):\n\n if deletion == False:\n # NOTE we won't it to run only after the delete has completed\n return\n\n log.debug(\"Reading profiles from progran\")\n onos = ProgranHelpers.get_progran_onos_info(self.model_accessor)\n profile_url = \"http://%s:%s/onos/progran/profile/\" % (onos['url'], onos['port'])\n r = requests.get(profile_url, auth=HTTPBasicAuth(onos['username'], onos['password']))\n res = r.json()['ProfileArray']\n\n\n # remove default profiles\n res = [p for p in res if \"Default\" not in p['Name']]\n pnames = [p['Name'] for p in res]\n log.debug(\"Received Profiles: \", profiles=pnames)\n\n field_mapping = {\n 'Name': 'name',\n 'Start': 'start',\n 'End': 'end',\n }\n\n field_transformations = {\n 'Start': ProgranHelpers.date_to_time,\n 'End': ProgranHelpers.date_to_time\n }\n\n handover_mapping = {\n 'A5Hysteresis': 'HysteresisA5',\n 'A3Hysteresis': 'HysteresisA3'\n }\n\n updated_profiles = []\n\n for p in res:\n\n\n # checking for profiles\n try:\n si = ProgranServiceInstance.objects.get(name=p['Name'])\n log.debug(\"Profile %s already exists, updating it\" % p['Name'])\n\n except IndexError:\n si = ProgranServiceInstance()\n\n si.created_by = \"Progran\"\n\n log.debug(\"Profile %s is new, creating it\" % p['Name'])\n\n if not si.is_new:\n # update IMSI association\n xos_imsis_for_profile = [i.subscriber_service_instance.leaf_model for i in si.provided_links.all()]\n progran_imsis_for_profile = p['IMSIRuleArray']\n\n log.debug(\"List of imsis for profile %s in XOS\" % p[\"Name\"], imsis=xos_imsis_for_profile)\n log.debug(\"List of imsis for profile %s in ONOS\" % p[\"Name\"], imsis=progran_imsis_for_profile)\n\n for i in xos_imsis_for_profile:\n if not i.imsi_number in progran_imsis_for_profile:\n log.debug(\"Removing Imsi %s from profile %s\" % (i.imsi_number, p['Name']))\n\n imsi_link = ServiceInstanceLink.objects.get(subscriber_service_instance_id=i.id)\n\n # NOTE: this model has already been removed from the backend, no need to synchronize\n imsi_link.backend_need_delete = False\n imsi_link.no_sync = True\n imsi_link.save() # we need to save it to avoid a synchronization loop\n\n imsi_link.delete()\n else:\n # remove from imsi list coming from progran everything we already know about\n progran_imsis_for_profile.remove(i.imsi_number)\n\n for i in progran_imsis_for_profile:\n log.debug(\"Adding Imsi %s to profile %s\" % (i, p['Name']))\n imsi = MCordSubscriberInstance.objects.get(imsi_number=i)\n imsi_to_profile = ServiceInstanceLink(provider_service_instance=si,\n subscriber_service_instance=imsi)\n imsi_to_profile.save()\n\n # if the model has not been synchronized yet, skip it\n if not si.is_new and si.no_sync is False:\n log.debug(\"Skipping profile %s as not synchronized\" % p['Name'])\n # NOTE add it to the removed profiles to avoid deletion (this is ugly, I know)\n updated_profiles.append(si.name)\n continue\n\n # ugly fix\n if 'AdmControl' in p.keys():\n p['AdmControl'] = str(p['AdmControl'])\n\n si = ProgranHelpers.update_fields(si, p, field_mapping, field_transformations)\n\n # checking for handovers\n handover_dict = p['Handover']\n handover_dict = ProgranHelpers.convert_keys(handover_dict, handover_mapping)\n del p['Handover']\n\n if si.handover_id:\n handover = si.handover\n log.debug(\"handover already exists, updating it\", handover=handover_dict)\n else:\n handover = Handover()\n handover = ProgranHelpers.update_fields(handover, handover_dict)\n log.debug(\"handover is new, creating it\", handover=handover_dict)\n handover.created_by = \"Progran\"\n\n handover = ProgranHelpers.update_fields(handover, handover_dict)\n handover.save()\n\n # Assigning handover to profile\n si.handover = handover\n\n # si.backend_status = \"OK\"\n # si.backend_code = 1\n\n si.no_sync = True\n si.previously_sync = True\n\n if p[\"MMECfg\"]:\n si.mmeip = str(p[\"MMECfg\"][\"IPAddr\"])\n si.mmeport = str(p[\"MMECfg\"][\"Port\"])\n\n si.enacted = time.mktime(datetime.datetime.now().timetuple())\n\n si.save()\n\n updated_profiles.append(si.name)\n\n existing_profiles = [p.name for p in ProgranServiceInstance.objects.all() if not p.is_new]\n deleted_profiles = ProgranHelpers.list_diff(existing_profiles, updated_profiles)\n\n if len(deleted_profiles) > 0:\n for p in deleted_profiles:\n si = ProgranServiceInstance.objects.get(name=p)\n if si.created_by == 'XOS' and si.previously_sync == False:\n # don't delete if the profile has been created by XOS and it hasn't been sync'ed yet\n continue\n # TODO delete also the associated Handover\n log.debug(\"Profiles %s have been removed in progran, removing it from XOS\" % str(p))\n si.delete()", "def dispatch(self, request, *args, **kwargs):\n profile = self.request.user.customerprofile\n\n if not self.security_test(profile):\n return HttpResponseRedirect(redirect_to=self.get_fail_security_test_url(profile))\n\n return super().dispatch(request, *args, **kwargs)", "def is_profile_complete(self):\n return bool(self.fullname and self.username and self.email)", "def profile(request):\n # Load last 5 orders as preview\n orders = Order._default_manager.filter(user=request.user)[0:5]\n return render(request, 'oscar/customer/profile.html', locals())", "def user_profile():\n if CURR_USER_KEY in session:\n return render_template('/profile/detail.html')\n else:\n return redirect('/login')", "def run_experiment(self):\n self.sign_up()\n self.participate()\n if self.sign_off():\n self.complete_experiment(\"worker_complete\")\n else:\n self.complete_experiment(\"worker_failed\")", "def complete_experiment(self, status):\n url = self.driver.current_url\n p = urllib.parse.urlparse(url)\n complete_url = \"%s://%s/%s?participant_id=%s\"\n complete_url = complete_url % (p.scheme, p.netloc, status, self.participant_id)\n self.driver.get(complete_url)\n logger.info(\"Forced call to %s: %s\" % (status, complete_url))", "def profile(request):\n profile = get_object_or_404(Profile, user=request.user)\n albums = profile.albums.all()\n plc_albums = albums.exclude(is_public=False)\n pvt_albums = albums.exclude(is_public=True)\n sent_f_requests = FriendRequest.objects.filter(\n from_user=profile.user\n )\n rec_f_requests = FriendRequest.objects.filter(\n to_user=profile.user\n )\n\n if request.method == 'POST':\n form = ProfileModelForm(\n request.POST or None,\n request.FILES or None,\n instance=profile\n )\n\n if form.is_valid():\n form.save()\n messages.success(request, \"Profile updated successfully\")\n\n form = ProfileModelForm(instance=profile)\n\n template = 'profiles/profile.html'\n context = {\n 'profile': profile,\n 'form': form,\n 'albums': albums,\n 'plc_albums': plc_albums,\n 'pvt_albums': pvt_albums,\n 'sent_req': sent_f_requests,\n 'rec_req': rec_f_requests,\n }\n return render(request, template, context)", "def teacher_restricted(function):\n @wraps(function)\n def decorated(*args, **kwargs):\n if is_setup():\n user = get_current_user()\n if user is not None and user.type == User.TEACHER:\n return function(*args, **kwargs)\n return redirect('/')\n return decorated", "def test_profile_requests_are_retried(self) -> None:\n has_failed_once = False\n\n async def get_remote_profile(\n user_id: str, ignore_backoff: bool = True\n ) -> JsonDict:\n nonlocal has_failed_once\n if user_id == \"@bruce:remote\":\n if not has_failed_once:\n has_failed_once = True\n raise SynapseError(502, \"temporary network problem\")\n\n return {\n \"displayname\": \"Sir Bruce Bruceson\",\n \"avatar_url\": \"mxc://remote/789\",\n }\n else:\n raise ValueError(f\"unable to fetch {user_id}\")\n\n with patch.object(self.profile_handler, \"get_profile\", get_remote_profile):\n # Continue from the earlier test...\n self.test_private_rooms_do_not_have_profiles_collected()\n\n # Advance by a minute\n self.reactor.advance(61.0)\n\n # The request has already failed once\n self.assertTrue(has_failed_once)\n\n # The profile has yet to be updated.\n profiles = self.get_success(\n self.user_dir_helper.get_profiles_in_user_directory()\n )\n self.assertNotIn(\n \"@bruce:remote\",\n profiles,\n )\n\n # Advance by five minutes, after the backoff has finished\n self.reactor.advance(301.0)\n\n # The profile should have been updated now\n profiles = self.get_success(\n self.user_dir_helper.get_profiles_in_user_directory()\n )\n self.assertEqual(\n profiles.get(\"@bruce:remote\"),\n ProfileInfo(\n display_name=\"Sir Bruce Bruceson\", avatar_url=\"mxc://remote/789\"\n ),\n )", "def login_prof(func):\n @wraps(func, assigned=available_attrs(func))\n def wrapper(request, *args, **kwargs):\n try:\n request.user.prof\n except ObjectDoesNotExist:\n return redirect('gradapp:dashboard_student')\n res = func(request, *args, **kwargs)\n return res\n return wrapper", "def profile_view(self, view):\n def profiled(request, *args, **kwargs):\n t1 = time.clock()\n response = view(request, *args, **kwargs)\n t2 = time.clock()\n log = lambda *args: logger.debug(u\"\".join(map(unicode, args)))\n log(\"profiled view:\\t\\t\", view.__name__)\n log(\"url:\\t\\t\\t\", request.get_full_path())\n log(\"subdomain:\\t\\t\", request.subdomain)\n log(\"get:\\t\\t\\t\", u\"\\n\\t\\t\\t\".join(\n u\"{0} => {1}\".format(k, request.GET.getlist(k))\n for k in request.GET))\n log(\"post:\\t\\t\\t\", u\"\\n\\t\\t\\t\".join(\n u\"{0} => {1}\".format(k, request.POST.getlist(k))\n for k in request.POST))\n log(\"arguments:\\t\\t\", args)\n log(\"named arguments:\\t\", kwargs)\n log(\"execution time:\\t\\t\", t2 - t1)\n log(\"query number:\\t\\t\", len(connection.queries))\n return response\n return wraps(view)(profiled)", "def job_profile(request, job_id):\n\n job = get_object_or_404(Jobs, pk=job_id)\n recruiter = RecruiterProfile.objects.filter(user=job.author).first()\n\n template = 'jobs/job_profile.html'\n context = {\n 'title': 'Job profile',\n 'job': job,\n 'recruiter': recruiter,\n }\n\n return render(request, template, context)", "def show_profiles(profiles, height=None, fname=None, **kwargs):\n _show_profiles(profiles, height, fname, **kwargs)", "def login_success(request):\n if not hasattr(request.user, 'profile'):\n return redirect('index')\n else:\n return redirect('registration_process')", "def use_profile(self, profile_name, workdir=None):\n error_messages = []\n\n # Clear all the current configuration before loading a new profile\n self._w3af_core.plugins.zero_enabled_plugins()\n MiscSettings().set_default_values()\n self._w3af_core.uri_opener.settings.set_default_values()\n\n if profile_name is None:\n # If the profile name is None, I just clear the enabled plugins and\n # return\n return\n\n # This might raise an exception (which we don't want to handle) when\n # the profile does not exist\n profile_inst = profile(profile_name, workdir)\n \n # It exists, work with it!\n\n # Set the target settings of the profile to the core\n self._w3af_core.target.set_options(profile_inst.get_target())\n\n # Set the misc and http settings\n try:\n profile_misc_settings = profile_inst.get_misc_settings()\n except BaseFrameworkException, e:\n msg = ('Setting the framework misc-settings raised an exception'\n ' due to unknown or invalid configuration parameters. %s')\n error_messages.append(msg % e)\n else:\n #\n # IGNORE the following parameters from the profile:\n # - misc_settings.local_ip_address\n #\n if 'local_ip_address' in profile_inst.get_misc_settings():\n local_ip = get_local_ip()\n profile_misc_settings['local_ip_address'].set_value(local_ip)\n\n misc_settings = MiscSettings()\n misc_settings.set_options(profile_misc_settings)\n\n try:\n http_settings = profile_inst.get_http_settings()\n except BaseFrameworkException, e:\n msg = ('Setting the framework http-settings raised an exception'\n ' due to unknown or invalid configuration parameters. %s')\n error_messages.append(msg % e)\n else:\n self._w3af_core.uri_opener.settings.set_options(http_settings)\n\n #\n # Handle plugin options\n #\n error_fmt = ('The profile you are trying to load (%s) seems to be'\n ' outdated, this is a common issue which happens when the'\n ' framework is updated and one of its plugins adds/removes'\n ' one of the configuration parameters referenced by a'\n ' profile, or the plugin is removed all together.\\n\\n'\n\n 'The profile was loaded but some of your settings might'\n ' have been lost. This is the list of issues that were'\n ' found:\\n\\n'\n ' - %s\\n'\n\n '\\nWe recommend you review the specific plugin'\n ' configurations, apply the required changes and save'\n ' the profile in order to update it and avoid this'\n ' message. If this warning does not disappear you can'\n ' manually edit the profile file to fix it.')\n\n core_set_plugins = self._w3af_core.plugins.set_plugins\n\n for plugin_type in self._w3af_core.plugins.get_plugin_types():\n plugin_names = profile_inst.get_enabled_plugins(plugin_type)\n\n # Handle errors that might have been triggered from a possibly\n # invalid profile\n try:\n unknown_plugins = core_set_plugins(plugin_names, plugin_type,\n raise_on_error=False)\n except KeyError:\n msg = ('The profile references the \"%s\" plugin type which is'\n ' unknown to the w3af framework.')\n error_messages.append(msg % plugin_type)\n continue\n \n for unknown_plugin in unknown_plugins:\n msg = ('The profile references the \"%s.%s\" plugin which is'\n ' unknown in the current framework version.')\n error_messages.append(msg % (plugin_type, unknown_plugin))\n\n # Now we set the plugin options, which can also trigger errors with\n # \"outdated\" profiles that users could have in their ~/.w3af/\n # directory.\n for plugin_name in set(plugin_names) - set(unknown_plugins):\n\n try:\n plugin_options = profile_inst.get_plugin_options(\n plugin_type,\n plugin_name)\n self._w3af_core.plugins.set_plugin_options(plugin_type,\n plugin_name,\n plugin_options)\n except BaseFrameworkException, w3e:\n msg = ('Setting the options for plugin \"%s.%s\" raised an'\n ' exception due to unknown or invalid configuration'\n ' parameters. %s')\n error_messages.append(msg % (plugin_type, plugin_name, w3e))\n\n if error_messages:\n msg = error_fmt % (profile_name, '\\n - '.join(error_messages))\n raise BaseFrameworkException(msg)", "def user_settings(request):\n return redirect('edit_profile')", "def profile(length=25, profile_dir=None):\n from werkzeug.contrib.profiler import ProfilerMiddleware\n app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[length],\n profile_dir=profile_dir)\n app.run()", "def profile(length=25, profile_dir=None):\n from werkzeug.contrib.profiler import ProfilerMiddleware\n app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[length],\n profile_dir=profile_dir)\n app.run()", "def profile(length=25, profile_dir=None):\n from werkzeug.contrib.profiler import ProfilerMiddleware\n app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[length],\n profile_dir=profile_dir)\n app.run()", "def profile_url(self):\n return reverse(\"auth_profile\", args=[self.user.username])", "def cancelDestination(self):\n return constants.PROFILE", "def cancelDestination(self):\n return constants.PROFILE", "def cancelDestination(self):\n return constants.PROFILE", "def on_remove_profile_action(self):\n profile = self.Manager.profile\n\n # TODO: while it can probably be safely assumed that currentData() and currentIndex() will refer to the same profile as Manager.profile, it is NOT guaranteed; we should either verify that they are indeed the same or search the model for the profile name (as pulled from the manager) to avoid the issue altogether\n if message('warning', 'Confirm Delete Profile',\n 'Delete \"' + profile.name + '\"?',\n 'Choosing \"Yes\" below will remove this profile '\n 'and all saved information within it, including '\n 'customized load-orders, ini-edits, etc. Note '\n 'that installed mods will not be affected. This '\n 'cannot be undone. Do you wish to continue?'):\n self.Manager.delete_profile(\n self._selector.currentData())\n self._selector.removeItem(\n self._selector.currentIndex())", "def Adjust_Profile_List( self ):\r\n listing = self.system.Profile_List() #Get the list of current profiles\r\n d=DialogProfileAdjust.DialogProfileAdjustment( self.root, listing, 'Profiles', 'Organize the Profiles' )\r\n if( d.return_state == 0 ):\r\n return #Cancel hit\r\n #Go through d's profile list, and try to add names not seen before\r\n for item in d.profile_list:\r\n self.system.Add_Empty_Profile( item )\r\n #Go through d's remove list, and try to remove names if they existed\r\n for name in d.remove_list:\r\n self.system.Remove_Profile( name )", "def validate_server_profile_task_step(profile_obj):\n\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n total = len(profile_obj)\n not_exists = 0\n verified = 0\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"Validate server profile <%s> task contains <%s>\" % (profile.name, profile.method))\n if not VerifyServerProfile.verify_server_profile_exist(profile.name, fail_if_false=False):\n logger.warn(\"server profile '%s' does not exist\" % profile.name)\n not_exists += 1\n continue\n\n CommonOperationServerProfile.click_server_profile(profile.name)\n FusionUIBase.select_view_by_name(view_name='Activity', timeout=5, fail_if_false=False)\n CommonOperationServerProfile.click_activity_collapser(profile.task)\n timeout = int(getattr(profile, 'validate_timeout', '5'))\n ret = VerifyServerProfile.verify_activity_contains_text(profile.method, timeout=timeout, fail_if_false=False)\n # Verify method text not exist in steps\n if getattr(profile, 'exist', '').lower() == 'false':\n if ret is True:\n ui_lib.fail_test(\"%s should not exist in task steps\" % profile.method)\n elif ret is False:\n ui_lib.fail_test(\"%s should exist in task steps\" % profile.method)\n\n logger.info(\"Server profile '%s' got the correct task method\" % profile.name)\n verified += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile to view! all %s server profile(s) is NOT existing, keyword '%s' returns a 'False'\" % (not_exists, sys._getframe().f_code.co_name))\n return False\n else:\n if verified < total:\n logger.warn(\"not all of task for the server profile(s) is successfully verified - %s out of %s verified \" % (verified, total))\n if verified + not_exists == total:\n logger.warn(\"%s not-existing server profile(s) is skipped, keyword '%s' returns a 'False'\" % (not_exists, sys._getframe().f_code.co_name))\n return False\n else:\n logger.warn(\"%s not-existing server profile(s) is skipped, %s profile(s) left is failed being verified \" % (not_exists, total - verified - not_exists))\n return False\n\n logger.info(\"all of the server profile(s) is successfully verified - %s out of %s \" % (verified, total))\n return True", "def profile_get_details(sender, instance, created, **kwargs):\n if created:\n return get_profile_details.delay(instance.id)", "def _1_profile(self, _1_profile):\n\n self.__1_profile = _1_profile", "def remove_profile(request, profile, device=0):\n context = {}\n config = get_object_or_404(ConfigurationProfile, pk=profile)\n if device == 0:\n # Completely remove Configuration Profile from MDM\n mode = 'delete'\n if config.installed.all().count() == 0:\n config.delete()\n messages.success(request, \"Profile was successfully deleted\", extra_tags='success')\n return HttpResponseRedirect(reverse(\"mdm:list\"))\n else:\n context['form'] = ProfileRemovalForm(mode=mode)\n else:\n # Unlink profile from device\n laptop = get_object_or_404(Laptop, pk=device)\n if config in laptop.pending.all():\n laptop.pending.remove(config)\n record = InstallationRecord.objects.filter(profile=config, device=laptop, active=True, version=\"RM\").first()\n # If record exists (profile is already installed) reset to installed status\n if record is not None:\n laptop.installed.add(config)\n with open(config.profile) as profile:\n context = json.load(profile)\n record.version = str(context['data']['version'])\n record.save()\n messages.success(request, \"Removal request cancelled\", extra_tags='success')\n else:\n messages.success(request, \"Profile is no longer assigned to {}\".format(laptop.name),\n extra_tags='success')\n return HttpResponseRedirect(reverse(\"mdm:list\"))\n elif config in laptop.installed.all():\n mode = 'disassociate'\n context['form'] = ProfileRemovalForm(mode=mode)\n else:\n raise Http404\n\n # If auto-removal option presented, handle form data\n if request.method == 'POST':\n form = ProfileRemovalForm(request.POST, mode=mode)\n if form.is_valid():\n selected = form.cleaned_data['options']\n if selected == 'auto':\n if mode == 'disassociate':\n record = get_object_or_404(InstallationRecord, profile=config, device=laptop, active=True)\n record.version = \"RM\"\n record.save()\n laptop.installed.remove(config)\n laptop.pending.add(config)\n else:\n # Cancel all pending assignments first\n for laptop in config.pending_install.all():\n config.pending_install.remove(laptop)\n\n # Prepare MDM to remove profile from device\n for laptop in config.installed.all():\n record = get_object_or_404(InstallationRecord, profile=config, device=laptop, active=True)\n record.version = \"RM\"\n record.save()\n laptop.installed.remove(config)\n laptop.pending.add(config)\n messages.success(request, \"Profiles will be removed automatically at next checkin\",\n extra_tags='success')\n else:\n if mode == 'disassociate':\n record = get_object_or_404(InstallationRecord, profile=config, device=laptop, active=True)\n record.expires = timezone.now()\n record.active = False\n record.save()\n laptop.installed.remove(config)\n messages.success(request, \"Profile successfully removed from {}\".format(laptop.name),\n extra_tags='success')\n else:\n for laptop in config.installed.all():\n record = get_object_or_404(InstallationRecord, profile=config, device=laptop, active=True)\n record.expires = timezone.now()\n record.active = False\n record.save()\n config.delete()\n messages.success(request, \"Configuration profile deleted successfully\")\n return HttpResponseRedirect(reverse(\"mdm:list\"))\n else:\n context['form'] = form\n return render(request, 'form_crispy.html', context)", "def get_success_url(self):\n\n # get the pk for this quote\n profile_pk = self.kwargs['profile_pk']\n status_pk = self.kwargs['status_pk']\n\n # # reverse to show the person page.\n return reverse('show_profile_page', kwargs={'pk':profile_pk})", "def on_resume(self, userdata):\n pass", "def open_profile_menager():\r\n reload(edc_profile_manager)\r\n edc_profile_manager.ProfileManager()", "def add_profile_details(request):\n\n if request.method == 'POST':\n try:\n profile = Profile.objects.get(user=request.user)\n edit_profile = EditProfileForm(request.POST, instance=profile)\n if edit_profile.is_valid():\n profile.save()\n messages.success(request, 'Your profile has been updated')\n if profile.user_type == 'dismantler':\n return redirect(reverse('addlisting'))\n else:\n return redirect(reverse('listings'))\n except ObjectDoesNotExist:\n profile_form = EditProfileForm(request.POST)\n if profile_form.is_valid():\n profile = Profile.objects.create(\n user=request.user,\n user_type=profile_form.cleaned_data['user_type'],\n business_name=profile_form.cleaned_data['business_name'],\n phone=profile_form.cleaned_data['phone'],\n postcode=profile_form.cleaned_data['postcode'],\n city=profile_form.cleaned_data['city'],\n street_address=profile_form.cleaned_data['street_address'],\n street_address2=profile_form.cleaned_data[\n 'street_address2'],\n county=profile_form.cleaned_data['county'],\n country=profile_form.cleaned_data['country'],\n )\n profile.save()\n messages.success(request, 'Your profile has been saved')\n if profile.user_type == 'dismantler':\n return redirect(reverse('addlisting'))\n else:\n return redirect(reverse('listings'))\n else:\n try:\n profile = Profile.objects.get(user=request.user)\n profile_form = EditProfileForm(instance=profile)\n context = {\n 'profile': profile,\n 'profile_form': profile_form,\n }\n except ObjectDoesNotExist:\n profile_form = EditProfileForm()\n context = {\n 'profile_form': profile_form,\n }\n\n return render(request, 'profile.html', context)", "def check_enable_actions(self):\n\n if self._profile_name is None:\n self.enableProfileActions.emit(\n False, \"Remove Profile\", False)\n elif self._profile_name.lower() == 'default':\n self.enableProfileActions.emit(\n False, \"Cannot Remove Default Profile\", False)\n else:\n self.enableProfileActions.emit(True, \"Remove Profile\", True)", "def profile(request, username):\n if User.objects.filter(username=username).exists():\n profile_user = User.objects.filter(\n username=username)[0]\n if 'paige' in profile_user.username:\n raise Http404\n else:\n raise Http404\n\n page_username = get_rand_username(profile_user)\n\n request_type = request.GET.get('type', '')\n\n subnav_key, subnav_value, page_title = get_active_page(\n 'profile', request_type)\n\n header = possesive(page_username, page_title)\n title = possesive(profile_user.username, page_title)\n\n template_values = {\n 'page_title': title,\n 'nav_profile': 'active',\n subnav_key: subnav_value,\n 'header': header,\n 'user': request.user,\n 'profile_user': profile_user,\n 'header-classes': '',\n 'floor_percentile': get_floor_percentile(\n profile_user.profile),\n 'trend': get_day_trend(profile_user.profile),\n 'num_commprods': cpm.CommProd.objects.filter(\n user_profile=profile_user.profile).count(),\n 'num_votes': cpm.Rating.objects.filter(\n user_profile=profile_user.profile).count()\n }\n\n if request_type != '':\n return profile_search(request,\n template_values, profile_user)\n template_values.update(profile_query_manager(\n request.user, profile_user))\n return render_to_response('profile.html',\n template_values, request)", "def add_profile():\n \n form=ProfileForm() \n if 'username' in session:\n user = mongo.db.user.find_one({'username': session['username']})\n pro = mongo.db.profiles.find_one({'user_id': user['_id']})\n if pro:\n flash('Sorry, only one profile per user permitted. You can update your profile on your dashboard under the profile tab.', 'info')\n return redirect(url_for('dashboard'))\n \n if request.method == 'POST':\n if form.validate_on_submit():\n \n mongo.db.profiles.insert_one({'user_id': user['_id'],\n 'headline': form.headline.data,\n 'bio': form.bio.data,\n 'username': session['username'],\n 'date': datetime.utcnow(),\n 'xp': form.xp.data,\n 'interests': form.interests.data,\n 'stack': form.stack.data,\n 'languages': form.languages.data,\n 'frameworks': form.frameworks.data,\n 'github': form.github.data,\n 'linkedin': form.linkedin.data\n })\n flash('Your profile has been created.', 'success')\n return redirect('profiles')\n \n return render_template('pages/addprofile.html', title='Post',\n form=form, legend='Create your profile')\n \n flash('You need to be logged in to post any content.', 'info')\n return redirect(url_for('login'))", "def transform_profiles(self):\n\n if len(self.pardus_profiles) > 0: # Make sure we have some profiles to migrate\n for profile in self.pardus_profiles:\n network_manager_profile = NetworkManagerProfile(profile.get_profile_name, profile)\n self.network_manager_profiles.append(network_manager_profile)", "def profile_detail(request, pk):\n profile = request.user.userprofile\n user_relationships = profile.get_relationships()\n user_request = profile.get_friend_request()\n\n context = {\n # 'user': user,\n 'profile': profile,\n 'user_relationships': user_relationships,\n 'user_request': user_request\n }\n\n return render(request, 'accounts/profile_detail.html', context)", "def cyChangeProfile(self):\n d = database.getCurrentAndMaxProfileId()\n d.addCallback(self.cbChangeProfile)\n return d", "def handle_expired_profiles():\n expired_profiles = InstallationRecord.objects.filter(expires__lte=timezone.now(), active=True)\n for record in expired_profiles:\n device = record.device\n profile = record.profile\n device.installed.remove(profile)\n record.active = False\n record.save()", "def test_redirect_profile(self):\n self.create_user_and_login(\n agreed_to_terms_of_service=True,\n filled_out=False\n )\n\n resp = self.client.get(DASHBOARD_URL)\n self.assertRedirects(resp, PROFILE_URL)", "def profileupdaterequest_discard(request, request_id):\n profileupdate = get_object_or_404(ProfileUpdateRequest, active=True,\n pk=request_id)\n profileupdate.active = False\n profileupdate.save()\n\n messages.success(request,\n 'Profile update request was discarded successfully.')\n return redirect(reverse('all_profileupdaterequests'))", "def save_profile(self, data, suffix=''):\n # pylint: disable=unused-argument\n self.display_name = data.get('display_name', self.display_name)\n\n users_included_email = data.get('users_included_email', self.users_included_email)\n try:\n regexp_string = self.regexp_from_users_included_email(users_included_email)\n re.compile(regexp_string)\n except:\n raise JsonHandlerError(400, 'Users to exclude by email is causing an error, please edit.')\n self.users_included_email = users_included_email\n\n self.profile_display_job_title = data.get('profile_display_job_title', self.profile_display_job_title)\n self.profile_display_organisation = data.get('profile_display_organisation', self.profile_display_organisation)\n self.profile_display_work_country = data.get('profile_display_work_country', self.profile_display_work_country)\n self.profile_display_email_button = data.get('profile_display_email_button', self.profile_display_email_button)\n self.profile_display_bio = data.get('profile_display_bio', self.profile_display_bio)\n self.enable_cohorts = data.get('enable_cohorts', self.enable_cohorts)", "def incomplete_user_submissions(request):\n incomplete = Phenotype.objects.filter(phenotypestatus__status='I')\n context = {'incomplete':incomplete }\n return render(request, \"SNP_Feature_View/incomplete_user_submissions.html\", context)", "def people_operation(self, operation, value, profiles=None, query_params=None, timezone_offset=None,\n ignore_alias=False, backup=False, backup_file=None):\n assert self.token, \"Project token required for People operation!\"\n if profiles is not None and query_params is not None:\n Mixpanel.LOGGER.warning(\"profiles and query_params both provided, please use one or the other\")\n return\n\n if profiles is not None:\n profiles_list = Mixpanel._list_from_argument(profiles)\n elif query_params is not None:\n profiles_list = self.query_engage(query_params, timezone_offset=timezone_offset)\n else:\n # If both profiles and query_params are None just fetch all profiles\n profiles_list = self.query_engage()\n\n if backup:\n if backup_file is None:\n backup_file = \"backup_\" + str(int(time.time())) + \".json\"\n self.export_data(profiles_list, backup_file, append_mode=True)\n\n # Set the dynamic flag to True if value is a function\n dynamic = isfunction(value)\n\n self._dispatch_batches(self.IMPORT_API, 'engage', profiles_list,\n [{}, self.token, operation, value, ignore_alias, dynamic])\n\n profile_count = len(profiles_list)\n Mixpanel.LOGGER.debug(operation + ' operation applied to ' + str(profile_count) + ' profiles')\n return profile_count", "def setprofile(self, w_func):\n if self.space.is_w(w_func, self.space.w_None):\n self.profilefunc = None\n self.w_profilefuncarg = None\n else:\n self.setllprofile(app_profile_call, w_func)", "def public_list(request, username, list):\n if request.user.is_authenticated:\n return HttpResponseRedirect('/person/%s/%s/' % (username, list))\n else:\n person = get_object_or_404(UserProfile, username=username).user\n args = fetch_profile(request, person)\n\n if list == 'listings':\n listings = []\n for copy in person.own_books.all().order_by('-add_time'):\n item = {}\n item['book'] = copy.book\n item['courses'] = [course for course in copy.book.courses.all()]\n item['copy'] = copy\n listings.append(item)\n args['listings'] = listings\n\n elif list == 'wishlist':\n wish_books = []\n for book in person.wish_books.all():\n item = {}\n item['book'] = book\n item['courses'] = [course for course in book.courses.all()]\n wish_books.append(item)\n\n args['wish_books'] = wish_books\n\n return render_to_response('profile/public_profile_%s.html'%list, args, context_instance=RequestContext(request))", "def profile_devices(request, pk):\n context = {}\n profile = get_object_or_404(ConfigurationProfile, pk=pk)\n to_remove = InstallationRecord.objects.filter(profile=profile, device__pending__in=[profile], active=True,\n version=\"RM\")\n pending = Laptop.objects.filter(pending__in=[profile]).exclude(install_records__in=to_remove)\n installed = InstallationRecord.objects.filter(profile=profile, device__installed__in=[profile], active=True)\\\n .exclude(version=\"RM\")\n pending_removal = []\n for record in to_remove:\n pending_removal.append(record.device)\n context['resource'] = profile\n context['resource_type'] = 'Profile'\n context['pending'] = pending\n context['pending_removal'] = pending_removal\n context['installed'] = installed\n context['today'] = timezone.now()\n context['expiration_warning'] = timezone.now() + timezone.timedelta(days=30)\n return render(request, 'mdm/device_list.html', context)", "def profile(request):\n profile = request.user.profile\n return render(request, 'accounts/profile.html', {'profile': profile})", "def complete_url(self):\n return url_for('complete',id=self.id)", "def list_profiles(request, pk=0):\n context = {'items': [], 'resource_type': 'Profile'}\n handle_expired_profiles()\n if pk == 0:\n context['h2'] = \"Configuration Profiles\"\n context['header_1'] = \"Type\"\n context['header_2'] = \"Last Modified\"\n profiles = ConfigurationProfile.objects.all().reverse()\n for profile in profiles:\n assignment_count = profile.pending_install.count()\n install_count = profile.installed.count()\n data = {'filename': str(profile), 'type': \"macOS\", 'meta': profile, 'assignment_count': assignment_count,\n 'install_count': install_count}\n context['items'].append(data)\n else:\n device = get_object_or_404(Laptop, pk=pk)\n context['h2'] = \"Profiles for {}\".format(device.name)\n context['header_1'] = \"Version\"\n context['header_2'] = \"Expires\"\n context['device_view'] = True\n context['device_id'] = pk\n profiles = ConfigurationProfile.objects.filter(pending_install__in=[device])\n profiles |= ConfigurationProfile.objects.filter(installed__in=[device])\n for profile in profiles:\n status = 'Not assigned'\n for entry in profile.installed.all():\n if entry == device:\n status = 'Installed'\n for entry in profile.pending_install.all():\n if entry == device:\n status = 'Assigned'\n record = InstallationRecord.objects.filter(profile=profile, device=device, active=True).first()\n expires_soon = False\n if record is not None and record.expires is not None:\n if timezone.now() < record.expires < timezone.now() + timezone.timedelta(days=30):\n expires_soon = True\n data = {'filename': str(profile), 'downloadable': False, 'install_record': record, 'meta': profile,\n 'status': status, 'expires_soon': expires_soon}\n context['items'].append(data)\n\n return render(request, 'mdm/resource_list.html', context)", "def has_erp_profile(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=reverse_lazy('err404')):\n actual_decorator = user_passes_test(\n lambda u: hasattr(u, \"erp_profile\") and hasattr(u, \"profile\"),\n login_url=login_url,\n redirect_field_name=redirect_field_name\n )\n if function:\n return actual_decorator(function)\n return actual_decorator", "def apply_only(self, function, worker, *args, **kwargs):\n pass", "def show_policy_profile(self, profile, **params):\r\n return self.get(self.policy_profile_path % (profile), params=params)", "def _create_additional_senders(self) -> Dict[str, uploader_utils.RequestSender]:\n if self._should_profile():\n source_bucket = uploader_utils.get_source_bucket(self._logdir)\n\n self._additional_senders[\"profile\"] = functools.partial(\n profile_uploader.ProfileRequestSender,\n api=self._api,\n upload_limits=self._upload_limits,\n blob_rpc_rate_limiter=self._blob_rpc_rate_limiter,\n blob_storage_bucket=self._blob_storage_bucket,\n blob_storage_folder=self._blob_storage_folder,\n source_bucket=source_bucket,\n tracker=self._tracker,\n logdir=self._logdir,\n )", "def profile(self, r, **attr):\n\n tablename = self.tablename\n get_config = current.s3db.get_config\n\n header = get_config(tablename, \"profile_header\")\n\n # Get the page widgets\n widgets = get_config(tablename, \"profile_widgets\")\n if not widgets and not header:\n # Profile page not configured:\n if r.representation not in (\"dl\", \"aadata\"):\n # Redirect to the Read View\n redirect(r.url(method=\"read\"))\n else:\n # No point redirecting\n r.error(405, current.ERROR.BAD_METHOD)\n\n # Index the widgets by their position in the config\n for index, widget in enumerate(widgets):\n widget[\"index\"] = index\n\n if r.representation == \"dl\":\n # Ajax-update of one datalist\n index = r.get_vars.get(\"update\", None)\n if index:\n try:\n index = int(index)\n except ValueError:\n datalist = \"\"\n else:\n # @ToDo: Check permissions to the Resource & do\n # something different if no permission\n datalist = self._datalist(r, widgets[index], **attr)\n output = {\"item\": datalist}\n\n elif r.representation == \"aadata\":\n # Ajax-update of one datatable\n index = r.get_vars.get(\"update\", None)\n if index:\n try:\n index = int(index)\n except ValueError:\n datalist = \"\"\n else:\n # @ToDo: Check permissions to the Resource & do\n # something different if no permission\n datatable = self._datatable(r, widgets[index], **attr)\n return datatable\n\n else:\n # Default page-load\n\n # Page Title\n title = get_config(tablename, \"profile_title\")\n if not title:\n try:\n title = r.record.name\n except:\n title = current.T(\"Profile Page\")\n elif callable(title):\n title = title(r)\n\n # Page Header\n if not header:\n header = H2(title, _class=\"profile-header\")\n elif callable(header):\n header = header(r)\n\n output = {\"title\": title,\n \"header\": header,\n }\n\n # Update Form, if configured\n update = get_config(tablename, \"profile_update\")\n if update:\n editable = get_config(tablename, \"editable\", True)\n authorised = self._permitted(method=\"update\")\n if authorised and editable:\n show = get_crud_string(tablename, \"title_update\")\n hide = current.T(\"Hide Form\")\n form = self.update(r, **attr)[\"form\"]\n else:\n show = get_crud_string(tablename, \"title_display\")\n hide = current.T(\"Hide Details\")\n form = self.read(r, **attr)[\"item\"]\n\n if update == \"visible\":\n hidden = False\n label = hide\n style_hide, style_show = None, \"display:none\"\n else:\n hidden = True\n label = show\n style_hide, style_show = \"display:none\", None\n\n toggle = A(SPAN(label,\n data = {\"on\": show,\n \"off\": hide,\n },\n ),\n ICON(\"down\", _style=style_show),\n ICON(\"up\", _style=style_hide),\n data = {\"hidden\": hidden},\n _class = \"form-toggle action-lnk\",\n )\n form.update(_style=style_hide)\n output[\"form\"] = DIV(toggle,\n form,\n _class = \"profile-update\",\n )\n else:\n output[\"form\"] = \"\"\n\n # Widgets\n response = current.response\n rows = []\n append = rows.append\n row = None\n cols = get_config(tablename, \"profile_cols\")\n if not cols:\n cols = 2\n row_cols = 0\n for widget in widgets:\n\n # Render the widget\n w_type = widget[\"type\"]\n if w_type == \"comments\":\n w = self._comments(r, widget, **attr)\n elif w_type == \"datalist\":\n w = self._datalist(r, widget, **attr)\n elif w_type == \"datatable\":\n w = self._datatable(r, widget, **attr)\n elif w_type == \"form\":\n w = self._form(r, widget, **attr)\n elif w_type == \"map\":\n w = self._map(r, widget, widgets, **attr)\n elif w_type == \"report\":\n w = self._report(r, widget, **attr)\n elif w_type == \"organizer\":\n w = self._organizer(r, widget, **attr)\n elif w_type == \"custom\":\n w = self._custom(r, widget, **attr)\n else:\n if response.s3.debug:\n raise SyntaxError(\"Unsupported widget type %s\" %\n w_type)\n else:\n # ignore\n continue\n\n if row is None:\n # Start new row\n row = DIV(_class=\"row profile\")\n row_cols = 0\n\n # Append widget to row\n row.append(w)\n colspan = widget.get(\"colspan\", 1)\n row_cols += colspan\n if row_cols == cols:\n # Close this row\n append(row)\n row = None\n\n if row:\n # We have an incomplete row of widgets\n append(row)\n output[\"rows\"] = rows\n\n # Activate this if a project needs it\n #response.view = get_config(tablename, \"profile_view\") or \\\n # self._view(r, \"profile.html\")\n response.view = self._view(r, \"profile.html\")\n\n return output", "def get(self, request, *args, **kwargs):\n if request.user.is_authenticated():\n return HttpResponseRedirect('/')\n return super(ProfileCreate, self).get(request, *args, **kwargs)", "def _should_profile(self) -> bool:\n if \"profile\" in self._allowed_plugins:\n if not self._one_shot:\n raise ValueError(\n \"Profile plugin currently only supported for one shot.\"\n )\n logger.info(\"Profile plugin is enalbed.\")\n return True\n return False", "def get_profiles(self):\n # print(self.uir) #checkpoint\n if os.path.isdir(self.uir+\"/profiles\"):\n profiles=os.listdir(self.uir+\"/profiles\")\n # print(profiles) #checkpoint\n for profile in profiles:\n wsadmin=self.uir+\"/profiles/\"+profile+\"/bin/wsadmin.bat\"\n if os.path.isfile(wsadmin): #check for wsadmin.bat.\n self.profiles.append(self.uir+\"/profiles/\"+profile)\n\n else: print(self.uir+' Instance does not have \"profile\" folder in '+self.uir)\n return", "def test_no_profiles(self):\n c = Client()\n response = c.get(reverse('profiles:index'))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.data),0)", "def delete_expired_users(self):\n days_valid = settings.ACCOUNT_ACTIVATION_DAYS\n expired = datetime.now() - timedelta(days=days_valid)\n prof_ids = self.filter(user__date_joined__lt=expired)\n prof_ids = prof_ids.values_list('id', flat=True)\n for chunk in chunked(prof_ids, 1000):\n _delete_registration_profiles_chunk.apply_async(args=[chunk])", "def test_no_redirect_profile(self):\n self.create_user_and_login(\n agreed_to_terms_of_service=True,\n filled_out=False\n )\n\n resp = self.client.get(PROFILE_URL)\n assert resp.status_code == 200", "def resume(self):\n raise NotImplementedError()", "def resume(self):\n raise NotImplementedError()", "def user_detail(request, slug):\n user = request.user\n profile = Profile.objects.get(slug=slug)\n albums = profile.albums.all()\n plc_albums = albums.exclude(is_public=False)\n pvt_albums = albums.exclude(is_public=True)\n\n friends = profile.friends.all()\n family = profile.relations.all()\n user_family = user.profile.relations.all()\n user_friends = user.profile.friends.all()\n\n receiver = FriendRequest.objects.filter(from_user=profile.user)\n sender = FriendRequest.objects.filter(to_user=profile.user)\n \n received = []\n sent = []\n for item in receiver:\n received.append(item.id)\n received.append(item.to_user)\n\n for item in sender:\n received.append(item.id)\n sent.append(item.from_user)\n\n template = 'profiles/user_detail.html'\n context = {\n 'profile': profile,\n 'friends': friends,\n 'family': family,\n 'albums': albums,\n 'plc_albums': plc_albums,\n 'pvt_albums': pvt_albums,\n 'received': received,\n 'sent': sent,\n 'user_family': user_family,\n 'user_friends': user_friends,\n }\n return render(request, template, context)", "def gohome(username):\n generate_plan(username)\n generate_mileage_line(username)\n # if username == 'alex':\n # last_date = '2018-04-18'\n # generate_map(username, last_date)\n\n return redirect(url_for('.foo', username=username))", "def toggle_interested(self):\n user = self.context['request'].user\n # pylint: disable=no-member\n profile = UserProfile.objects.get(user=user)\n workshop = self.context['workshop']\n\n if workshop in profile.interested_workshops.all():\n workshop.interested_users.remove(profile)\n else:\n workshop.interested_users.add(profile)", "def account_profile(request):\n get_or_creat(request)\n return redirect(\"/\")", "def not_complete(request):\n print(\"not_complete method in tutor_helper.py\")\n if user_auth(request):\n user = User.objects.get(email=request.user.email)\n print(\"\\t\", user)\n current_user = UserInformation.objects.get(user=user)\n if current_user.current_main_set is None:\n return False\n if current_user.completed_sets is not None:\n if current_user.current_main_set not in current_user.completed_sets.all():\n print(\"not complete\")\n print(current_user.current_main_set)\n return True\n else:\n if current_user.completed_sets is None:\n return True\n return False", "def profiles():\n images = get_uploaded_images()\n records = db.session.query(UserProfile).all()\n return render_template('profiles.html', images=images, records =records)", "def view_profile(request, user_id=None):\r\n\r\n requesting_user = request.user\r\n\r\n if user_id:\r\n requested_user = get_object_or_404(User, pk=user_id)\r\n else:\r\n requested_user = requesting_user\r\n\r\n profile = Profile.objects.for_user(requested_user)\r\n\r\n datasets = DataSet.objects.active().filter(creator=requested_user).order_by('-created_at')\r\n projects = Project.objects.active().filter(creator=requested_user).order_by('-created_at')\r\n datarequests = \\\r\n DataRequest.objects.active(). \\\r\n filter(creator=requested_user). \\\r\n exclude(status='C'). \\\r\n order_by('-created_at')\r\n\r\n render_to_response_data = {\r\n 'datarequests': datarequests,\r\n 'datasets': datasets,\r\n 'profile': profile,\r\n 'projects': projects\r\n }\r\n\r\n return render_to_response(\r\n 'core/view_profile.html',\r\n render_to_response_data,\r\n context_instance=RequestContext(request))", "def completion() -> None:", "def manage_myprofile(request):\n profile = request.user.get_profile()\n users_image = profile.users_image\n if not profile:\n raise Http404\n if request.method == 'POST':\n profile_form = MyProfileForm(request.POST, instance = profile)\n address_contact_form = AddressForm(request.POST,\n instance = profile.address_contact, prefix = 'contact')\n address_permanent_form = AddressForm(request.POST,\n instance = profile.address_permanent, prefix = 'permanent')\n\n if profile_form.is_valid() and address_contact_form.is_valid() \\\n and address_permanent_form.is_valid():\n address_contact = address_contact_form.save()\n address_permanent = address_permanent_form.save()\n\n profile_form.save(address_contact = address_contact,\n address_permanent = address_permanent)\n messages.success(request,\n _('your profile details saved sucessfully'))\n else:\n profile_form = MyProfileForm(instance = profile)\n address_contact_form = AddressForm(instance = profile.address_contact,\n prefix = 'contact')\n address_permanent_form = AddressForm(instance\n = profile.address_permanent, prefix = 'permanent')\n\n return render(request, 'myprofile.html', {\n 'profile_form': profile_form,\n 'address_contact_form': address_contact_form,\n 'address_permanent_form': address_permanent_form,\n 'users_image': users_image\n },\n )# Create your views here.", "def test_profiles_table_populated(self):\n print('(' + self.test_profiles_table_populated.__name__ + ')',\n self.test_profiles_table_populated.__doc__)\n test_table_populated(self, USERS_PROFILE_TABLE,\n INITIAL_USERS_PROFILE_COUNT)" ]
[ "0.51761717", "0.48839507", "0.486394", "0.48614234", "0.48394087", "0.48157418", "0.4814398", "0.48082665", "0.4771799", "0.4770562", "0.47591376", "0.47403374", "0.4720351", "0.46573168", "0.46533135", "0.45912194", "0.4588692", "0.4584498", "0.45765498", "0.45561683", "0.45490643", "0.45352027", "0.45236737", "0.451534", "0.4514758", "0.45105773", "0.44994414", "0.44841406", "0.44824702", "0.44798812", "0.4478216", "0.44707206", "0.44531965", "0.44287917", "0.44249576", "0.44073284", "0.43962228", "0.43946055", "0.4390782", "0.43810835", "0.43806252", "0.4379491", "0.43779334", "0.43767852", "0.43767852", "0.43767852", "0.43731132", "0.43675512", "0.43675512", "0.43675512", "0.4359292", "0.435133", "0.433181", "0.4329754", "0.4327435", "0.43237472", "0.43225092", "0.4304902", "0.42986462", "0.42936817", "0.42868122", "0.42794538", "0.4275531", "0.42719445", "0.42667556", "0.4264197", "0.42588148", "0.42572355", "0.42536312", "0.4249012", "0.42394504", "0.42380074", "0.42375982", "0.42373282", "0.42348513", "0.4233564", "0.42312774", "0.4224686", "0.42224273", "0.4217722", "0.42165825", "0.42091927", "0.42052266", "0.4201184", "0.4198161", "0.41971987", "0.41964084", "0.41931236", "0.41919622", "0.41918695", "0.41918695", "0.41890314", "0.4188898", "0.41879028", "0.41827995", "0.41804403", "0.41782373", "0.41758317", "0.41755122", "0.4174852", "0.41735944" ]
0.0
-1
Return the guessed language of POSTed text.
def POST(self, text): lang = guess_language(text) return {'language': lang}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_language(self, text):\n try:\n post_lang = detect(text)\n except:\n post_lang = 'N/A'\n return post_lang", "def guess_language(text): # pragma: no cover\n try:\n from guess_language import guessLanguage\n return Language.fromguessit(guessLanguage(text))\n\n except ImportError:\n log.error('Cannot detect the language of the given text body, missing dependency: guess-language')\n log.error('Please install it from PyPI, by doing eg: pip install guess-language')\n return UNDETERMINED", "def detect_language(self):\n if not self.clean:\n self._text_clean()\n if not self.clean:\n return\n self.payload = \"q={}\".format(self.text)\n resp = requests.request('POST', self.url_language, data=self.payload.encode('utf-8'),\n headers=self.translate_headers)\n try:\n self.language = json.loads(resp.text)['data']['detections'][0][0]['language']\n except KeyError:\n return", "def detect_language(text: str) -> str:\n # Text can also be a sequence of strings, in which case this method\n # will return a sequence of results for each text.\n return str(translate_client.detect_language(text)[\"language\"])", "def _detect_language(self, text):\n\n ratios = self._calculate_languages_ratios(text)\n\n most_rated_language = max(ratios, key=ratios.get)\n\n return most_rated_language", "def language_name(self, text: str) -> str:\n values = extract(text)\n input_fn = _to_func(([values], []))\n pos: int = next(self._classifier.predict_classes(input_fn=input_fn))\n\n LOGGER.debug(\"Predicted language position %s\", pos)\n return sorted(self.languages)[pos]", "def detect_language(text: str) -> str:\n\n # Check if `text` is '-'; if not, try and identify language, otherwise return '-'\n if text != \"-\":\n\n # Detect the language of `text`, and return the most confident/prevalent language, if `text` contains\n # multiple languages. If language detection fails, return an error string\n try:\n langs = {language.confidence: language.code for language in Detector(text, quiet=True).languages}\n return langs[max(langs.keys())]\n except Exception:\n return f\"[ERROR] {text} {sys.exc_info()}\"\n else:\n return \"-\"", "def detectLanguage(self, text):\n return self._er.jsonRequestAnalytics(\"/api/v1/detectLanguage\", { \"text\": text })", "def requestLanguage(request):\n # Return the user language preferences for registered users\n if request.user.valid and request.user.language:\n return request.user.language\n\n # Or try to return one of the user browser accepted languages, if it\n # is available on this wiki...\n available = wikiLanguages()\n if not request.cfg.language_ignore_browser:\n for lang in browserLanguages(request):\n if lang in available:\n return lang\n \n # Or return the wiki default language...\n if request.cfg.language_default in available:\n lang = request.cfg.language_default\n # If everything else fails, read the manual... or return 'en'\n else:\n lang = 'en'\n return lang", "def get_user_language() -> str:\n languages = {\n \"arabic\": \"arb\",\n \"chinese\": \"cmn-CN\",\n \"danish\": \"da-DK\",\n \"english\": \"en-GB\",\n \"french\": \"fr-FR\",\n \"german\": \"de-DE\",\n \"portuguese\": \"pl-PT\",\n \"spanish\": \"es-ES\"\n }\n textlang = input(\"What language do you want to hear?\")\n try:\n return languages[textlang.lower()]\n except KeyError as e:\n print(\"Enter a valid language.\")\n sys.exit(1)", "def detect_language(text, languages=LANGUAGES):\n if text_count(text, LANGUAGES[0]['common_words']) > text_count(text, LANGUAGES[1]['common_words']):\n return 'Spanish'\n else:\n return 'German'", "def language(self, target):\n self._check_target(target)\n return target.language or self._default_language", "def init_language(self):\n\n if 'HTTP_COOKIE' in os.environ:\n cookies = os.environ['HTTP_COOKIE'].split(';')\n for cookie in cookies:\n (key, value) = cookie.split('=')\n if key == Intuition.COOKIE_USERLANG:\n return value\n \n return self.default_language", "def get_language(self):\r\n return self.language", "def get_language(self, article):\r\n # we don't want to force the target laguage\r\n # so we use the article.meta_lang\r\n if self.config.use_meta_language == True:\r\n if article.meta_lang:\r\n return article.meta_lang[:2]\r\n return self.config.target_language", "def get_language(self):\n return self.lang", "def get_language():\n try:\n from leaves.middleware import request_context\n return request_context.language\n except:\n return get_site().preferences.default_language", "def detect_language(self, text):\n language_ratios = {}\n words = set([word.lower() for word in nltk.word_tokenize(text) if len(word)>2])\n\n for language in self._get_available_languages():\n stopwords_set = set(stopwords.words(language))\n common_elements = words.intersection(stopwords_set)\n language_ratios[language] = len(common_elements)\n\n return max(language_ratios, key=language_ratios.get)", "def get_language(self) -> str:\n return self.language", "def best_match_language(self):\n if not self.accept_language:\n return None\n return self.accept_language.best_match(\n i18n.get_available_languages())", "def language(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"language\")", "def get_lang(self):\n return self.langs.lang", "def detect(text):\n try:\n return langdetect.detect(text)\n except LangDetectException:\n return None", "def guess_language(self, path):\n problem, ext = os.path.splitext(os.path.basename(path))\n language = _LANGUAGE_GUESS.get(ext, None)\n\n if language == 'Python':\n python_version = str(sys.version_info[0])\n try:\n python_version = self.cfg.get('defaults', 'python-version')\n except configparser.Error:\n pass\n\n if python_version not in ['2', '3']:\n raise KattisSubmissionError(\n \"Invalid Python version specified in .kattisrc, must be 2 or 3\")\n language = 'Python ' + python_version\n\n if language is None:\n raise KattisSubmissionError(\"Could not guess submission language\")\n\n return language", "def get_locale():\n if (session.get(\"language\") is not None):\n return session.get('language')['charcode']\n return request.accept_languages.best_match(app.config['LANGUAGES'].keys())", "def programming_language(self) -> str:\n return self.random.choice(PROGRAMMING_LANGS)", "def text_language(text):\n hebrew = 0\n english = 0\n for char in text:\n if char in \"אבגדהוזחטיכךלמםנסעפףצץקרשת\":\n hebrew += 1\n elif char.lower() in \"abcdefghijklmnopqrstuvwxyz\":\n english += 1\n return {True: \"hebrew\", False: \"english\"}[hebrew > english]", "def _getLang(self, language):\n if language == None:\n language = self.getDefaultLanguage()\n\n return language", "def get_language(self, word, lang=None):\n lang = lang or self.cfg.get('lang', 'en')\n # let's retrieve the word from configuration dict.\n try:\n return self.cfg['words_' + lang][word]\n except StandardError:\n return 'Do not know how to \"{}\" in \"{}\"'.format(word, lang)", "def language(self):\r\n return self._get('language', {})", "def guess_language(lang_list=None):\n\tlang_codes = frappe.request.accept_languages.values()\n\tif not lang_codes:\n\t\treturn frappe.local.lang\n\n\tguess = None\n\tif not lang_list:\n\t\tlang_list = get_all_languages() or []\n\n\tfor l in lang_codes:\n\t\tcode = l.strip()\n\t\tif not isinstance(code, text_type):\n\t\t\tcode = text_type(code, 'utf-8')\n\t\tif code in lang_list or code == \"en\":\n\t\t\tguess = code\n\t\t\tbreak\n\n\t\t# check if parent language (pt) is setup, if variant (pt-BR)\n\t\tif \"-\" in code:\n\t\t\tcode = code.split(\"-\")[0]\n\t\t\tif code in lang_list:\n\t\t\t\tguess = code\n\t\t\t\tbreak\n\n\treturn guess or frappe.local.lang", "def get_language(self) -> str:\n return settings.LANGUAGE_CODE", "def get_language(self):\n return self.language if self.language is not None else get_language()", "def process_text(self, text, language):", "def language(self) -> str:\n if self.language_code in CODE_TO_LANGUAGE:\n return CODE_TO_LANGUAGE[self.language_code]\n\n return self.language_code", "def get_lang(self):\n\n path = self.get_lang_path()\n for language in self.languages:\n if language in path:\n return language", "def getLanguage(self):\n return self.getOrDefault(self.language)", "def get_lang(self):\n props = getToolByName(self.context,\n 'portal_properties')\n return props.site_properties.getProperty('default_language') or 'en'", "def get_meta_lang(self):\n # we have a lang attribute in html\n attr = self.parser.getAttribute(self.article.doc, attr='lang')\n if attr is None:\n # look up for a Content-Language in meta\n items = [\n {'tag': 'meta', 'attr': 'http-equiv', 'value': 'content-language'},\n {'tag': 'meta', 'attr': 'name', 'value': 'lang'}\n ]\n for item in items:\n meta = self.parser.getElementsByTag(self.article.doc, **item)\n if meta:\n attr = self.parser.getAttribute(meta[0], attr='content')\n break\n\n if attr:\n value = attr[:2]\n if re.search(RE_LANG, value):\n return value.lower()\n\n return None", "def language(self):\n # type: () -> string_types\n return self._language", "def language(self):\n # type: () -> string_types\n return self._language", "def detect_languages(text):\n try:\n return langdetect.detect_langs(text)\n except LangDetectException:\n return None", "def language(self):\n lang = None\n if self.__dict__['TAG:language']:\n lang = self.__dict__['TAG:language']\n return lang", "def language(self):\n portal_state = self.context.unrestrictedTraverse(\"@@plone_portal_state\")\n return aq_inner(self.context).Language() or portal_state.default_language()", "def language(self):\n if self.consent:\n self.consent.language\n translation.activate(self.consent.language)\n self._language = translation.get_language()\n else:\n self._language = settings.LANGUAGE_CODE\n return self._language", "def language_code(self) -> str:\n return pulumi.get(self, \"language_code\")", "def lang(self):\n return self._lang", "def get_proper_language():\n lang = config['summernote'].get('lang')\n\n if not lang:\n return config['lang_matches'].get(get_language(), 'en-US')\n\n return lang", "def Language(self, default=None):\n return self.data.get('language', default)", "def clean_lang(self):\n lang = self.cleaned_data.get('lang', None)\n if not lang in self.greetings:\n raise forms.ValidationError(\n \"We couldn't find the language you selected {}\"\n \" Please select another\".format(lang)\n )\n return lang", "def get_language(lang_list: list = None) -> str:\n\tis_logged_in = frappe.session.user != \"Guest\"\n\n\t# fetch language from form_dict\n\tif frappe.form_dict._lang:\n\t\tlanguage = get_lang_code(frappe.form_dict._lang or get_parent_language(frappe.form_dict._lang))\n\t\tif language:\n\t\t\treturn language\n\n\t# use language set in User or System Settings if user is logged in\n\tif is_logged_in:\n\t\treturn frappe.local.lang\n\n\tlang_set = set(lang_list or get_all_languages() or [])\n\n\t# fetch language from cookie\n\tpreferred_language_cookie = get_preferred_language_cookie()\n\n\tif preferred_language_cookie:\n\t\tif preferred_language_cookie in lang_set:\n\t\t\treturn preferred_language_cookie\n\n\t\tparent_language = get_parent_language(language)\n\t\tif parent_language in lang_set:\n\t\t\treturn parent_language\n\n\t# fetch language from request headers\n\taccept_language = list(frappe.request.accept_languages.values())\n\n\tfor language in accept_language:\n\t\tif language in lang_set:\n\t\t\treturn language\n\n\t\tparent_language = get_parent_language(language)\n\t\tif parent_language in lang_set:\n\t\t\treturn parent_language\n\n\t# fallback to language set in System Settings or \"en\"\n\treturn frappe.db.get_default(\"lang\") or \"en\"", "def detect(text, format='plain'):\n if format not in FORMATS:\n raise TypeError('The format should be one of %s' % (FORMATS,))\n params = {'text': text, 'format': format}\n r = requests.get('http://translate.yandex.net/api/v1/tr.json/detect', params=params)\n code = r.json['code']\n if code == 200:\n lang = r.json['lang']\n if lang:\n return lang\n raise LanguageNotDetected\n else:\n raise TranslationError(code)", "def get_weather_language(self):\n return self.bot_data_file[\"weather\"][\"default_language\"]", "def get_language_name(self):\n return self.language_name", "def get_language(self, article):\r\n # we don't want to force the target laguage\r\n # so we use the article.meta_lang\r\n if self.config.use_meta_language == True:\r\n if article.meta_lang:\r\n self.language = article.meta_lang[:2]\r\n self.language = self.config.target_language", "def translate(self, language=None):", "def detect_language(text, LANGUAGES):\n lang = None\n word_count = 0\n our_test = []\n \n for language in LANGUAGES:\n \n result = get_word_count(text, language['common_words'])\n print(result)\n #import pdb; pdb.set_trace()\n if result > word_count:\n lang = language['name']\n word_count = result\n \n return lang", "def language(self):\n return self._language", "def language(self):\n return self._language", "def language(self):\n return self._language", "def language(self):\n return self._language", "def language(self):\n if \"language\" in self._prop_dict:\n return self._prop_dict[\"language\"]\n else:\n return None", "def browserLanguages(request):\n fallback = []\n accepted = request.http_accept_language\n if accepted:\n # Extract the languages names from the string\n accepted = accepted.split(',')\n accepted = map(lambda x: x.split(';')[0], accepted)\n # Add base language for each sub language. If the user specified\n # a sub language like \"en-us\", we will try to to provide it or\n # a least the base language \"en\" in this case.\n for lang in accepted:\n lang = lang.lower()\n fallback.append(lang)\n if '-' in lang:\n baselang = lang.split('-')[0]\n fallback.append(baselang)\n return fallback", "def language(self, text_language):\n language = text_language.strip().lower()\n if language in LANGUAGE_TO_CODE:\n self._language_code = LANGUAGE_TO_CODE[language]\n else:\n self._language_code = language[:2]", "def logon_language(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"logon_language\")", "def get_language(benchmark):\n config = benchmark_config.get_config(benchmark)\n return config.get('language', 'c++')", "def code(self):\n return self.language()", "def identify_lang(\n self,\n text: str,\n with_probs: bool = False,\n ) -> str | Tuple[str, float]:\n if not self._is_valid_text(text):\n result = (\"un\", 1.0)\n else:\n text_ = utils.to_collection(text, str, list)\n result = models.get_topn_preds_and_probs(\n self.model.predict(text_), 1, self.classes\n )[0][0]\n return result[0] if with_probs is False else result", "def get_related_language(self) -> str:\n pass", "def get_locale():\n localLang = request.args.get('locale')\n supportLang = app.config['LANGUAGES']\n if localLang in supportLang:\n return localLang\n userId = request.args.get('login_as')\n if userId:\n localLang = users[int(userId)]['locale']\n if localLang in supportLang:\n return localLang\n localLang = request.headers.get('locale')\n if localLang in supportLang:\n return localLang\n return request.accept_languages.best_match(app.config['LANGUAGES'])", "def to_language(self):\n return self.language()", "def is_english(text):\n\n lang = langid.classify(text)\n if lang and 'en' in lang[0]:\n return True\n return False", "def language(self) -> str:\n return self._language", "def language(self) -> str:\n return self._language", "def get_language(mgroups):\n\n if mgroups:\n lang = mgroups[0].strip('[').strip(']')\n return lang.lower().strip()\n return None", "def get_text(name, language_code) -> MLWText:\n if language_code not in data:\n # default language code\n language_code = 'ru'\n retval = data.get(language_code).get(name)\n if retval is None:\n raise ValueError(f'Undefined text name: {name}')\n return retval", "def getMetaLang(self, article):\n # we have a lang attribute in html\n attr = Parser.getAttribute(article.doc, attr='lang')\n if attr is None:\n # look up for a Content-Language in meta\n kwargs = {'tag':'meta',\n 'attr':' http-equiv',\n 'value':'content-language'}\n meta = Parser.getElementsByTag(article.doc, **kwargs)\n if meta:\n attr = Parser.getAttribute(meta[0], attr='content')\n \n if attr:\n value = attr[:2]\n if re.search(RE_LANG, value):\n return value.lower()\n \n return None", "def get_language(lang_code) -> str:\n langs = defaultdict(lambda: \"en\", {\"ru\": \"ru\"})\n return langs[lang_code.split(\"-\")[0]] if lang_code else \"en\"", "def identifyLangage(script):\n\tlangage = \"undefined\"\n\tscriptNameInArray = script.split(\".\")\n\textension = scriptNameInArray[-1]\n\t\n\tif(extension == \"pl\"):\n\t\tlangage = \"perl\"\n\telif(extension == \"py\"):\n\t\tlangage = \"python\"\n\telif(extension == \"sh\"):\n\t\tlangage = \"bash\"\n\telse:\n\t\tlangage == \"not recognised\"\n\n\treturn langage", "def language_code(self):\n return self._language_code", "def get_full_language(self, language):\n if language:\n language = pycountry.languages.get(alpha_2=language)\n if language:\n language = language.name\n return language.title()", "def language(self):\n hcell = self._get_hcell2()\n celltype = hcell[\"celltype\"]\n if celltype != \"code\":\n raise AttributeError\n return hcell.get(\"language\", \"python\")", "def recommended_correction(text):\n tool = language_check.LanguageTool('en-US')\n matches = tool.check(text)\n correction = language_check.correct(text, matches)\n return correction", "def audio_language(self):\n # type: () -> string_types\n return self._audio_language", "def get_language_and_script(tracker):\n script = \"latin\"\n language = \"en\"\n for event in reversed(tracker.events):\n if event.get(\"event\") == \"user\":\n parse_data = event['parse_data']\n language = parse_data['language']['name']\n script = parse_data['script']\n break\n return language, script", "def default_language(self):\n return self._default_language", "def get_meta_lang(self, article):\r\n # we have a lang attribute in html\r\n attr = self.parser.getAttribute(article.doc, attr='lang')\r\n if attr is None:\r\n # look up for a Content-Language in meta\r\n items = [\r\n {'tag': 'meta', 'attr': 'http-equiv', 'value': 'content-language'},\r\n {'tag': 'meta', 'attr': 'name', 'value': 'lang'}\r\n ]\r\n for item in items:\r\n meta = self.parser.getElementsByTag(article.doc, **item)\r\n if meta:\r\n attr = self.parser.getAttribute(meta[0], attr='content')\r\n break\r\n\r\n if attr:\r\n value = attr[:2]\r\n if re.search(RE_LANG, value):\r\n return value.lower()\r\n\r\n return None", "def get_default_language():\n return getattr(thread_locals, 'DEFAULT_LANGUAGE',\n settings.DEFAULT_LANGUAGE)", "def get_project_lang(self):\n return self.project_name_lang.currentText() # .replace(\"é\",\"e\").lower()", "def lang_genoeg(lengte):\n return", "def get_language(fn):\n # FIXME - this expects the fn to be '.../XX/LC_MESSAGES/messages.po'\n return fn.split(os.sep)[-3]", "def get_site_meta_language(\n header,\n body,\n accepted_langs=KNOWN_LANG_TAGS\n):\n if not isinstance(header, str) and not isinstance(header, dict):\n raise ValueError(\n \"unknown header - expected str or dict - got '%s'\" % (type(header))\n )\n\n if isinstance(body, bs4.BeautifulSoup):\n bs = body\n elif isinstance(body, str):\n bs = parse_html(body)\n else:\n raise ValueError(\"unknown body - expected str or bs4.BeautifulSoup - \"\n \"got '%s'\" % (type(body)))\n\n # Prefer language tags in the following order:\n # HTML Start -> HTTP-EQUIV -> HEADER\n\n # HTML Start Tag\n html_lang = extract_html_lang_tag(bs, accepted_langs=accepted_langs)\n if html_lang is not None:\n return html_lang\n\n # HTTP-EQUIV Tag\n http_equiv_lang = extract_http_equiv_lang_tag(\n bs, accepted_langs=accepted_langs\n )\n if http_equiv_lang is not None:\n return http_equiv_lang\n\n # Header\n http_header_lang = extract_http_header_lang_tag(\n header, accepted_langs=accepted_langs\n )\n if http_header_lang is not None:\n return http_header_lang\n\n # No language found\n return None", "def wikiLanguages():\n return languages", "def get_default_language():\n utility = queryUtility(ILanguageAvailability)\n if utility is not None:\n return utility.getDefaultLanguage()\n return DEFAULT_LANGUAGE", "def test_lang_is_missing(app):\n rv = app.test_client().post('/tokenize', \n json={\n 'text': \"I still haven't found what i'm looking for\",\n })\n json_data = rv.get_json()\n tokens = json_data['tokens']\n lang = json_data['lang']\n assert tokens == ['I', 'still', 'have', 'not', 'found', 'what', 'i', 'am', 'looking', 'for']\n assert lang == 'en'", "def default_language(self) -> str:\n return self.raw_config.get(\"default_language\", \"en\")", "def getDefaultLocaleLanguage():\n # Setup textdomain\n try:\n locale.bindtextdomain(TEXT_DOMAIN, DEFAULT_LOCALE_PATH)\n except AttributeError:\n log_func.warning(u'Locale module not support text domain')\n\n language = locale.getlocale()[0]\n\n if sys_func.isWindowsPlatform():\n if language in WINDOWS2UNIX_LANGUAGE:\n language = WINDOWS2UNIX_LANGUAGE.get(language, DEFAULT_LOCALE)\n else:\n try:\n item1, item2 = language.split('_')\n language = '_'.join((item1[:2].lower(), item2[:2].upper()))\n except:\n log_func.fatal(u'Error get language')\n language = DEFAULT_LOCALE\n return language", "def get_discussion_language(matchdict, params, session, current_issue_uid=None):\n if not current_issue_uid:\n current_issue = DBDiscussionSession.query(Issue).filter(Issue.is_disabled == False,\n Issue.is_private == False).first()\n current_issue_uid = current_issue.uid if current_issue else None\n\n # first matchdict, then params, then session, afterwards fallback\n issue = matchdict['issue'] if 'issue' in matchdict \\\n else params['issue'] if 'issue' in params \\\n else session['issue'] if 'issue' in session \\\n else current_issue_uid\n\n db_issue = DBDiscussionSession.query(Issue).get(issue)\n\n return db_issue.lang if db_issue else 'en'", "def generation_language(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"generation_language\")", "def search_language(string, allowed_languages=None):\n\n if allowed_languages:\n allowed_languages = set(Language.fromguessit(lang) for lang in allowed_languages)\n\n confidence = 1.0 # for all of them\n\n for prop, language, lang, word in find_possible_languages(string, allowed_languages):\n pos = string.find(word)\n end = pos + len(word)\n\n # only allow those languages that have a 2-letter code, those that\n # don't are too esoteric and probably false matches\n # if language.lang not in lng3_to_lng2:\n # continue\n\n # confidence depends on alpha2, alpha3, english name, ...\n if len(lang) == 2:\n confidence = 0.8\n elif len(lang) == 3:\n confidence = 0.9\n elif prop == 'subtitleLanguage':\n confidence = 0.6 # Subtitle prefix found with language\n else:\n # Note: we could either be really confident that we found a\n # language or assume that full language names are too\n # common words and lower their confidence accordingly\n confidence = 0.3 # going with the low-confidence route here\n\n return Guess({prop: language}, confidence=confidence, input=string, span=(pos, end))\n\n return None" ]
[ "0.84711045", "0.7443068", "0.72882843", "0.7189894", "0.7099894", "0.7063926", "0.69427687", "0.67506385", "0.6746426", "0.6721064", "0.670669", "0.66078335", "0.6576339", "0.6564039", "0.65360004", "0.65185875", "0.6517528", "0.6502088", "0.64020914", "0.63992137", "0.638809", "0.636545", "0.63535464", "0.6345171", "0.63440436", "0.6300778", "0.6292615", "0.6288095", "0.6284557", "0.62177", "0.62065065", "0.6205134", "0.6200681", "0.6187067", "0.61738425", "0.6164025", "0.61572164", "0.61534196", "0.61461824", "0.61213815", "0.61213815", "0.61139077", "0.6099754", "0.6066645", "0.60621613", "0.60311115", "0.602326", "0.6020779", "0.59972745", "0.5970926", "0.5970076", "0.5968754", "0.5962315", "0.59465677", "0.5940854", "0.5932111", "0.5930215", "0.5915078", "0.5915078", "0.5915078", "0.5915078", "0.59109825", "0.5909415", "0.5903796", "0.58822864", "0.58788896", "0.5878079", "0.5855931", "0.5852331", "0.58358383", "0.58244526", "0.5799995", "0.57979697", "0.57979697", "0.579593", "0.5787942", "0.5786909", "0.5779748", "0.5771757", "0.5766443", "0.57542443", "0.5742177", "0.57126904", "0.57098424", "0.56974256", "0.56895715", "0.56890726", "0.56838524", "0.5662316", "0.5655929", "0.5649916", "0.56289583", "0.56121475", "0.5611957", "0.5606814", "0.560539", "0.5597122", "0.5580251", "0.5561005", "0.55546385" ]
0.7704462
1
Generates a sha1 checksum for a given string
def get_checksum(str): hash_object = hashlib.sha1(b'%s' % str) hex_dig = hash_object.hexdigest() return hex_dig
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sha1(s: str) -> str:\n return hashlib.sha1(s.encode()).hexdigest()", "def sha1(self, s):\n\t\tself.sha1_calls += 1\n\t\treturn int(hashlib.sha1(s).hexdigest(), 16)", "def SHA1(self) -> _n_0_t_3[_n_0_t_9]:", "def checksum_from_sha1(value):\n # More constrained regex at lexer level\n CHECKSUM_RE = re.compile('SHA1:\\\\s*([\\\\S]+)', re.UNICODE)\n match = CHECKSUM_RE.match(value)\n if match:\n return checksum.Algorithm(identifier='SHA1', value=match.group(1))\n else:\n return None", "def _calc_sha1(path):\n calc = hashlib.sha1()\n with open(path, 'r') as f:\n calc.update(f.read())\n return calc.hexdigest()", "def get_sha1(src: str) -> str:\n if not isinstance(src, str) or src == \"\":\n raise Exception(\"Invalid src str\")\n i = io.BytesIO(bytearray(src, encoding='utf-8'))\n return get_sha1_from_stream(i)", "def gen_hash(s: str) -> str:\n\n m = hashlib.md5()\n m.update(bytes(s, encoding = 'utf8'))\n hash_code = str(m.hexdigest())\n\n return hash_code", "def generate_sha1(string, salt=None):\n if not salt:\n salt = sha1(str(random.random())).hexdigest()[:5]\n hash = sha1(salt+str(string)).hexdigest()\n\n return (salt, hash)", "def _get_checksum(self, text):\n # Compute the new checksum over everything but the sha1sum line.\n # This will fail if sha1sum appears for some other reason. It won't ;-)\n text = \"\".join([line for line in text.splitlines(True) if \"sha1sum\" not in line])\n return utils.str_checksum(text)", "def hashhex(s):\n h = hashlib.sha1()\n h.update(s.encode('utf-8'))\n return h.hexdigest()", "def hashhex(s):\n h = hashlib.sha1()\n h.update(s.encode('utf-8'))\n return h.hexdigest()", "def hashhex(s):\n h = hashlib.sha1()\n h.update(s)\n return h.hexdigest()", "def keyhash(string):\n return hashlib.sha1(string.encode('utf-8')).hexdigest()", "def hashhex(s):\n h = hashlib.sha1()\n h.update(s.encode())\n return h.hexdigest()", "def sha1(self) -> str:\n return self.data.sha1", "def digest(string):\n return md5(string.encode(\"utf-8\")).hexdigest()", "def calculate_hash(stuff):\n\tsha1 = hashlib.sha1()\n\tsha1.update(stuff)\n\treturn sha1.hexdigest()", "def sha1(data):\n\n d = rpki.POW.Digest(rpki.POW.SHA1_DIGEST)\n d.update(data)\n return d.digest()", "def addChecksum(s):\n if len(s) < 1:\n raise ValueError, \"The provided string needs to be atleast 1 byte long\"\n return (_calcChecksum(s) + s)", "def _sha1_hash_json(self, value):\n hash = hashlib.new(\"sha1\")\n binary_value = value.encode(\"ascii\")\n hash.update(binary_value)\n sha1_res = hash.hexdigest()\n return sha1_res", "def computeHash(string):\n\tif isBytes(string):\n\t\tstring = string.decode(\"latin-1\")\n\thash_ = 63689\n\tfor char in string:\n\t\thash_ = hash_ * 378551 + ord(char)\n\treturn hash_ % 65536", "def sha1Function():\r\n\r\n sha1Input = input(\"Enter SHA-1 String: \") # user input for hashing\r\n \r\n sha1Result = hashlib.sha1(sha1Input.encode()) # encoding user input then sending to sha1() function\r\n \r\n print(\"Hashing Successful\")\r\n print(\"The SHA-1 Hashing Result is : \", end =\"\") \r\n print(sha1Result.hexdigest()) # printing the hashing result in hexadecimal value\r\n\r\n menu() # display the menu again\r", "def md5(s1):\n s = str(s1)\n h1 = hashlib.md5()\n h1.update(s.encode(encoding='utf-8'))\n s = h1.hexdigest()\n return s", "def strhash(s: str) -> int:\n h = hashlib.md5(s.encode('utf-8'))\n h = int(h.hexdigest(), base=16)\n return h", "def hex_sha1_of_bytes(data: bytes) -> Sha1HexDigest:\n return Sha1HexDigest(hashlib.sha1(data).hexdigest())", "def sha1sum(filename):\n if not os.path.isfile(filename):\n return ''\n hasher = hashlib.sha1()\n with open(filename, 'rb') as hash_file:\n buf = hash_file.read(HASH_BLOCK_SIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = hash_file.read(HASH_BLOCK_SIZE)\n return hasher.hexdigest()", "def make_packet(self, string):\n\n string = string[:2] + \"checksum:,\" + string[2:]\n chksum = hashlib.sha256(string.encode()).hexdigest()\n string = string[:11] + chksum + string[11:]\n return string", "def sha1sum(filename):\n with open(filename, mode='rb') as f:\n d = hashlib.sha1()\n for buf in iter(functools.partial(f.read, 1024*100), b''):\n d.update(buf)\n return d.hexdigest()", "def checksum(**kwargs):\n\n # remove secretkey from kwargs, lookup if missing\n secretkey = kwargs.pop('secretkey', resolve_secretkey())\n\n # sort the args, and concatenate them\n param_string = ''.join([''.join([str(x), str(y)])\n for x, y in sorted(kwargs.items())])\n\n return b64encode(str(new_hmac(secretkey, param_string, sha1).digest()))", "def do_checksum(source_string):\n sum = 0\n max_count = 3\n count = 0\n while count < max_count:\n val = ord(source_string[count + 1]) * 256 + ord(source_string[count])\n sum = sum + val\n sum = sum & 0xffffffff\n count = count + 2\n if max_count < len(source_string):\n sum = sum + ord(source_string[len(source_string) - 1])\n sum = sum & 0xffffffff\n\n sum = (sum >> 16) + (sum & 0xffff)\n sum = sum + (sum >> 16)\n answer = ~sum\n answer = answer & 0xffff\n answer = answer >> 8 | (answer << 8 & 0xff00)\n print(answer)\n return answer", "def hash(self, string):\n h = md5()\n h.update(string)\n return h.digest()", "def hash_str(string):\n\n return hmac.new(secret, string).hexdigest()", "def md5_sum(string):\n m = hashlib.md5()\n m.update(string.encode(\"utf-8\"))\n return m.hexdigest()", "def getChecksum(self, s):\n \n chksum = 0\n for ch in s:\n chksum = chksum + ord(ch)\n \n return hex(chksum%256)[2:]", "def get_hash(s):\n hash_object = hashlib.md5(s.encode())\n return hash_object.hexdigest()", "def calc_md5(string):\n\treturn md5(string).hexdigest()", "def chord_hash(input_string):\n h = hashlib.sha1() # 160 bit string\n encoded_data = input_string.encode('utf-8')\n h.update(encoded_data)\n hex_string = h.hexdigest()\n hex_value = int(hex_string, 16)\n hash_integer_value = hex_value >> (160 - m)\n return hash_integer_value", "def checksum(value: str) -> str:\n return chr(65 + sum(CHECKSUM_TABLE[index % 2][ALPHANUMERICS_DICT[char]] for index, char in enumerate(value)) % 26)", "def checksum(source_string):\n sum = 0\n countTo = (len(source_string)/2)*2\n count = 0\n while count<countTo:\n thisVal = ord(source_string[count + 1])*256 + ord(source_string[count])\n sum = sum + thisVal\n sum = sum & 0xffffffff # Necessary?\n count = count + 2\n\n if countTo<len(source_string):\n sum = sum + ord(source_string[len(source_string) - 1])\n sum = sum & 0xffffffff # Necessary?\n\n sum = (sum >> 16) + (sum & 0xffff)\n sum = sum + (sum >> 16)\n answer = ~sum\n answer = answer & 0xffff\n\n # Swap bytes. Bugger me if I know why.\n answer = answer >> 8 | (answer << 8 & 0xff00)\n\n return answer", "def get_hash(content):\n return hashlib.sha1(content).hexdigest()", "def calc_md5(s: Union[bytes, str]) -> str:\n h = hashlib.new(\"md5\")\n\n b = s.encode(\"utf-8\") if isinstance(s, str) else s\n\n h.update(b)\n return h.hexdigest()", "def _calcChecksum(s):\n checksum = 1\n for i in xrange(0, len(s)):\n checksum += ord(s[i])\n checksum &= 0xFF\n return chr(checksum)", "def genHexStr(instr: str) -> str:\n\n return hashlib.md5(instr.encode(\"utf-8\")).hexdigest()", "def checksum(source_string):\n the_sum = 0\n count_to = (len(source_string)/2)*2\n count = 0\n while count < count_to:\n this_val = ord(source_string[count + 1])*256 + ord(source_string[count])\n the_sum = the_sum + this_val\n the_sum = the_sum & 0xffffffff # Necessary?\n count = count + 2\n\n if count_to<len(source_string):\n the_sum = the_sum + ord(source_string[len(source_string) - 1])\n the_sum = the_sum & 0xffffffff # Necessary?\n\n the_sum = (the_sum >> 16) + (the_sum & 0xffff)\n the_sum = the_sum + (the_sum >> 16)\n answer = ~the_sum\n answer = answer & 0xffff\n\n # Swap bytes. Bugger me if I know why.\n answer = answer >> 8 | (answer << 8 & 0xff00)\n\n return answer", "def hash(password):\n result = hashlib.sha1(password.encode())\n # return a hexadecimal digits\n return result.hexdigest()", "def hash_string(input_str):\n input_b = str.encode(input_str)\n input_hash = hashlib.md5(input_b.lower())\n input_hash_str = input_hash.hexdigest()\n\n return input_hash_str", "def checksum(data: str):\n if len(data) % 2 == 1:\n return data\n it = iter(data)\n new_data = ''\n for bit in it:\n if bit == next(it): # two consecutive characters are the same\n new_data += '1'\n else:\n new_data += '0'\n return checksum(new_data)", "def calchash(filename):\n sha = hashlib.sha1()\n with open(filename, 'rb') as f:\n sha.update(f.read())\n return sha", "def sha1(fname):\n fh = open(fname, 'rb')\n sha1 = hashlib.sha1()\n block = fh.read(2 ** 16)\n while len(block) > 0:\n sha1.update(block)\n block = fh.read(2 ** 16)\n\n return sha1.hexdigest()", "def sha1_p(value):\n # check if the value has the expected type\n string_p(value)\n\n # SHA-1 hash has 40 hexadecimal characters\n if not re.fullmatch(r\"^[a-f0-9]{40}$\", value):\n raise Invalid(\"the value '{value}' does not seem to be SHA1 hash\".format(value=value))", "def get_sha1_from_stream(src: io.IOBase) -> str:\n if not isinstance(src, io.IOBase) or not src.readable():\n raise Exception(\"src is not stream or unreadable\")\n m: hashlib._hashlib.HASH = hashlib.sha1()\n return calc_hash(src, m)", "def get_checksum(phrase):\n phrase = phrase.split(\" \")\n wstr = \"\".join(word[:3] for word in phrase)\n wstr = bytearray(wstr.encode('utf-8'))\n z = ((crc32(wstr) & 0xffffffff) ^ 0xffffffff ) >> 0\n z2 = ((z ^ 0xffffffff) >> 0) % len(phrase)\n return phrase[z2]", "def _hash_function(self, x):\n return hashlib.sha1(x).hexdigest()", "def hash(self, string):\n return self.__scaffydb.hash(string)", "def fasthash(string):\r\n md4 = hashlib.new(\"md4\")\r\n md4.update(string)\r\n return md4.hexdigest()", "def digest_mySQL41plus (string):\n\t\n\treturn MHASH ( MHASH_SHA1, MHASH(MHASH_SHA1, string).digest() ).digest()", "def hex_hash(s):\n if not s:\n return '0'\n s = s.encode('utf-8')\n return '{:x}'.format(adler32(s) & 0xffffffff)", "def md5(input_string):\n return hashlib.md5(input_string.encode('utf-8')).hexdigest()", "def checksum(data=b\"\"):\n hasher = get_hasher(DEFAULT_CHECKSUM_ALGO)\n hasher.update(data)\n return hasher", "def sha1HashFile(self, filename: Path):\n bufferSize = 65536\n sha1Hash = hashlib.sha1()\n\n with filename.open('rb') as f:\n while True:\n data = f.read(bufferSize)\n\n if not data:\n break\n\n sha1Hash.update(data)\n\n return str(sha1Hash.hexdigest())", "def calculate_checksum(source_string):\n countTo = (int(len(source_string) / 2)) * 2\n sum = 0\n count = 0\n\n # Handle bytes in pairs (decoding as short ints)\n loByte = 0\n hiByte = 0\n while count < countTo:\n if (byteorder == \"little\"):\n loByte = source_string[count]\n hiByte = source_string[count + 1]\n else:\n loByte = source_string[count + 1]\n hiByte = source_string[count]\n sum = sum + (ord(hiByte) * 256 + ord(loByte))\n count += 2\n\n # Handle last byte if applicable (odd-number of bytes)\n # Endianness should be irrelevant in this case\n if countTo < len(source_string): # Check for odd length\n loByte = source_string[len(source_string) - 1]\n sum += ord(loByte)\n\n sum &= 0xffffffff # Truncate sum to 32 bits (a variance from ping.c, which\n # uses signed ints, but overflow is unlikely in ping)\n\n sum = (sum >> 16) + (sum & 0xffff) # Add high 16 bits to low 16 bits\n sum += (sum >> 16) # Add carry from above (if any)\n answer = ~sum & 0xffff # Invert and truncate to 16 bits\n answer = socket.htons(answer)\n\n return answer", "def hash(plainString):\n result = plainString\n for i in range(0,12):\n result = hashHelp(result)\n return result", "def hexdigest_mySQL41plus (string):\n\t\n\treturn '*' + MHASH ( MHASH_SHA1, MHASH(MHASH_SHA1, string).digest() ).hexdigest()", "def _hash(self, string, hash_type):\n hash_types = {\n 'TABLE_OFFSET': 0,\n 'HASH_A': 1,\n 'HASH_B': 2,\n 'TABLE': 3\n }\n seed1 = 0x7FED7FED\n seed2 = 0xEEEEEEEE\n\n for ch in string.upper():\n if not isinstance(ch, int): ch = ord(ch)\n value = self.encryption_table[(hash_types[hash_type] << 8) + ch]\n seed1 = (value ^ (seed1 + seed2)) & 0xFFFFFFFF\n seed2 = ch + seed1 + seed2 + (seed2 << 5) + 3 & 0xFFFFFFFF\n\n return seed1", "def get_checksum(self):\n return _sha1_hex_upper(\n self['store_name'].value(),\n self.store_password,\n int(self['price'].value())\n )", "def get_hash_code(s):\n h = 0\n n = len(s)\n for i, c in enumerate(s):\n h = h + ord(c) * 31 ** (n - 1 - i)\n return StrUtil.convert_4_bytes(h)", "def apply_hash (self, s):\r\n m = md5()\r\n m.update (s)\r\n d = m.digest()\r\n # base64.encodestring tacks on an extra linefeed.\r\n return encodestring (d)[:-1]", "def sha_hash(file_name: str):\n BLOCKSIZE = 65536\n line = '' # format one line for hash\n with open(file_name, 'rb') as afile:\n buf = afile.read(BLOCKSIZE) # read each line of doc\n while len(buf) > 0:\n line += buf.decode('utf-8')\n buf = afile.read(BLOCKSIZE)\n\n hex = \"0x\" + sha1(line.encode()) # create sha1 hash\n return int(hex, 0)", "def _get_sha1(file_descriptor):\n sha1 = hashlib.sha1()\n for block in iter(partial(file_descriptor.read, BLOCK_SIZE), ''):\n sha1.update(block)\n file_descriptor.seek(0)\n return sha1.hexdigest()", "def nice_hash(*args):\n h = sha1()\n for item in args:\n h.update(unicode(item))\n return b32encode(h.digest())", "def get_256_hash_from_string(string):\n\n sha256 = hashlib.sha256()\n sha256.update(string.encode('utf-8'))\n\n return sha256.hexdigest()", "def md5hash(string):\n return hashlib.md5(string).hexdigest()", "def hash(string):\n hs = 0\n for s in string:\n hs += ord(s)\n return hs", "def computeHash(infile):\n f = open(infile, 'rb')\n buffer = f.read()\n f.close()\n return hashlib.sha1(buffer).hexdigest()", "def _get_checksum(self, arg):", "def string_md5(unicode_string):\n return hashlib.md5(unicode_string.encode('utf-8')).hexdigest()", "def hex_sha1_of_stream(input_stream: ReadOnlyStream, content_length: int) -> Sha1HexDigest:\n return Sha1HexDigest(\n update_digest_from_stream(\n hashlib.sha1(),\n input_stream,\n content_length,\n ).hexdigest()\n )", "def generate_hash(*args):\n key = bytes(' '.join(args), 'utf_8')\n hashh = hashlib.md5()\n hashh.update(key)\n return hashh.hexdigest()", "def compute_md5_for_string(string):\n return base64.b64encode(hashlib.md5(string).digest())", "def sha256_hexoutput(in_str):\r\n return sha256(in_str.encode('ascii')).hexdigest()", "def checksum(s):\n result = re.search('\\$(.*)\\*', s) # everything between '$' and '*' (escaped with '\\')\n\n # https://rietman.wordpress.com/2008/09/25/how-to-calculate-the-nmea-checksum/\n # see also https://forum.u-blox.com/index.php/14618/python-generate-checksums-validate-coming-serial-interface\n\n checksum = 0\n for thing in result.group(1):\n checksum = checksum ^ ord(thing) # Xor\n\n ck = hex(0x100 + checksum)[-2:].upper()\n return ck", "def _checksum(source_string):\n if (len(source_string) % 2):\n source_string += \"\\x00\"\n converted = array.array(\"H\", source_string)\n if sys.byteorder == \"big\":\n converted.bytewap()\n val = sum(converted)\n\n val &= 0xffffffff # Truncate val to 32 bits (a variance from ping.c, which\n # uses signed ints, but overflow is unlikely in ping)\n\n val = (val >> 16) + (val & 0xffff) # Add high 16 bits to low 16 bits\n val += (val >> 16) # Add carry from above (if any)\n answer = ~val & 0xffff # Invert and truncate to 16 bits\n answer = socket.htons(answer)\n\n return answer", "def str_to_hash(self, param):\n param = param.encode('utf-8')\n my_hash = hashlib.md5(param)\n return my_hash.hexdigest()", "def hash_cli_name(name):\n from hashlib import blake2b\n return blake2b(name.encode(), digest_size=32).hexdigest()", "def sha1(self):\n return self.tag(\"sha1\")", "def checksum(item):\n return hashlib.sha256(obj_to_str(item).encode('utf-8')).hexdigest()", "def checkSum(nmea_string):\n\n # take string after $\n nmea_str = re.sub(r'^\\$(.*)$', r'\\1', nmea_string)\n # clear whitespace\n nmea_str = re.sub(r'\\s', '', nmea_str)\n\n checksum = 0 # initialize\n for b in nmea_str:\n checksum ^= ord(b) # xor\n\n # need to remove the front '0x' from the import hex number\n return(nmea_string + \"*\" +\n re.sub(r'^0x', '', hex(checksum)).zfill(2))", "def checksum(self, **kwargs):\n try:\n # if a secretkey is in **kwargs, use it, and remove it\n secretkey = kwargs['secretkey']\n del kwargs['secretkey']\n except KeyError:\n # if the kwargs lookup fails, get secretkey elsewhere\n secretkey = self.secretkey or resolve_secretkey()\n args = kwargs.items()\n args.sort()\n\n param_string = ''\n for key, value in args:\n param_string += str(key)\n param_string += str(value)\n return b64encode(str(new_hmac(secretkey, param_string, sha1).digest()))", "def sha1(key: bytes, buffer: Optional[bytes] = None) -> Hmac:\n return new(key, buffer, \"sha1\")", "def sha1hex(doc):\n doc_id = doc.pop('_id',None)\n doc_rev = doc.get('_rev',None)\n doc_string = str(doc)\n\n if doc_id is not None:\n doc['_id'] = doc_id\n\n if doc_rev is not None:\n doc['_rev'] = doc_rev\n\n return hashlib.sha1(doc_string).hexdigest().upper()", "def hash_password(password):\n password_md5 = hashlib.md5(password.encode('utf-8')).hexdigest()\n for i in range(0, len(password_md5), 2):\n if password_md5[i] == '0':\n password_md5 = password_md5[0:i] + 'c' + password_md5[i + 1:]\n return password_md5", "def hash(self) -> str:\r\n ...", "def get_md5(string):\r\n byte_string = string.encode(\"utf-8\")\r\n md5 = hashlib.md5()\r\n md5.update(byte_string)\r\n result = md5.hexdigest()\r\n return 'M'+result", "def sha1sum(filename, blocksize=65536):\n hash = hashlib.sha1()\n with open(filename, \"rb\") as f:\n for block in iter(lambda: f.read(blocksize), b\"\"):\n hash.update(block)\n return hash.hexdigest()", "def hash(path):\n\n with open(path, 'r') as file:\n return hashlib.sha1(file.read()).hexdigest()", "def _check_hash(self, text):\n old = self.header.get(\"sha1sum\", None)\n if old is None:\n raise crexc.ChecksumError(\"sha1sum is missing in \" + repr(self.basename))\n if self._get_checksum(text) != self.header[\"sha1sum\"]:\n raise crexc.ChecksumError(\"sha1sum mismatch in \" + repr(self.basename))", "def _sha1(self):\n return hashlib.sha1(self._blob).hexdigest()", "def object_sha1(obj):\n\n return hashlib.sha1(json.dumps(obj).encode()).hexdigest()", "def test_010(self):\n calculator = checksum.get_checksum_calculator_by_dataone_designator('SHA-1')\n calculator.update('test')\n self.assertTrue(calculator.hexdigest())", "def sha256_2_string(string_to_hash):\n\n # Solution for (1a)\n import hashlib\n first_sha = hashlib.sha256(string_to_hash.encode(\"utf8\"))\n second_sha = hashlib.sha256(first_sha.digest())\n return second_sha.hexdigest()\n\n # Placeholder for (1a)\n return \"deadbeef\"" ]
[ "0.7719917", "0.75582397", "0.727031", "0.71468693", "0.7109886", "0.7050218", "0.704167", "0.7008932", "0.6944196", "0.6943862", "0.69164616", "0.6908371", "0.69008803", "0.6843064", "0.67979276", "0.67816305", "0.6755393", "0.67546755", "0.67312115", "0.6660172", "0.663221", "0.66225", "0.6610004", "0.66008973", "0.6598507", "0.65721923", "0.65559167", "0.6544531", "0.64942604", "0.6479331", "0.6443539", "0.64298064", "0.642465", "0.6423913", "0.64153665", "0.6413367", "0.64050597", "0.63930064", "0.6387411", "0.63654333", "0.63633907", "0.6359112", "0.6348757", "0.63382226", "0.63297623", "0.63294566", "0.6322918", "0.6292572", "0.62821054", "0.62619716", "0.626155", "0.6252075", "0.6217873", "0.62167186", "0.62017566", "0.6188258", "0.6173604", "0.61463803", "0.61409247", "0.61357707", "0.61175746", "0.61100906", "0.610206", "0.6100414", "0.60976434", "0.6087608", "0.60812336", "0.6076247", "0.60733354", "0.60689443", "0.60664076", "0.6062043", "0.6058574", "0.6046943", "0.60420406", "0.6039353", "0.6031185", "0.60022765", "0.59981143", "0.5997681", "0.5991511", "0.59877867", "0.59845024", "0.5981135", "0.5965407", "0.5965224", "0.5936517", "0.5932684", "0.5930827", "0.59304076", "0.59186876", "0.5916242", "0.5898073", "0.5889157", "0.5882236", "0.5878238", "0.5876438", "0.58753395", "0.58738333", "0.58668417" ]
0.79029137
0
Appends .svg to a checksum
def get_filename(checksum): return '%s.svg' % checksum
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _append_svg(self, svg, before_prompt=False):\n self._append_custom(self._insert_svg, svg, before_prompt)", "def __merger_svg(self):\n pass", "def save_svg(xml_string, checksum=None):\n if checksum is None:\n checksum = get_checksum(xml_string) # Get checksum of this file\n existing_url = is_duplicate_checksum(checksum) # Make sure it's unique\n if existing_url is not None: # We've generated this file before.\n logger.info('Duplicate detected for %s' % checksum)\n return existing_url # If dupe_check has a value, it's a URL to an existing (duplicate) file.\n\n # Usually, we've already checked for a duplicate - the above logic is just for cases\n # where we need to generate the checksum on the backend\n filename = get_filename(checksum)\n url = upload_svg(filename, xml_string)\n return url", "def paster_in_svg(self, src, elem):\n loger.info(\"start svg pasting\")\n with open(src) as f:\n tree = etree.parse(f)\n root = tree.getroot()\n element = tree.xpath('image')\n\n if element:\n # Replaces <gco_CharacterString> text\n for key, value in element[0].attrib.iteritems():\n if value == 'avatar':\n # element[0].attrib[key] = os.path.abspath(elem)\n element[0].attrib[key] = \"/home/kryvonis/PycharmProjects/Book_Creator/image_end/1.png\"\n # Save back to the XML file\n etree.ElementTree(root).write(src, pretty_print=True)\n loger.info('svg created - OK')", "def output_svg(self, string_to_output):\n self._output_object.add_report(string_to_output)", "def render_svg(svg):\n b64 = base64.b64encode(svg.encode('utf-8')).decode(\"utf-8\")\n html = r'<img src=\"data:image/svg+xml;base64,%s\"/>' % b64\n st.write(html, unsafe_allow_html=True)", "def send_svg():\n state = request.get_json()\n path = os.path.dirname(__file__).replace('core', 'resources/tmp')\n filename = path + \"/\" + now_date(str=True) + \"-roast.png\"\n cairosvg.svg2png(bytestring=state['svg'], write_to=filename)\n return jsonify({'success': True})", "def _insert_svg(self, cursor, svg):\n try:\n image = svg_to_image(svg)\n except ValueError:\n self._insert_plain_text(cursor, 'Received invalid SVG data.')\n else:\n format = self._add_image(image)\n self._name_to_svg_map[format.name()] = svg\n cursor.insertBlock()\n cursor.insertImage(format)\n cursor.insertBlock()", "def add(self, output_svg: Drawing) -> None:\n pass", "def create_svg(svg_tag, img_width, img_height, out_path):\n script_dir = utils.get_script_dir()\n svg_template_path = utils.join_paths_str(script_dir, \"./templates/template.svg\")\n with open(svg_template_path, \"rt\") as fin:\n with open(out_path, \"wt\") as fout:\n for line in fin:\n fout.write(\n line.replace(\"INSERT_WIDTH\", str(img_width))\n .replace(\"INSERT_HEIGHT\", str(img_height))\n .replace(\"INSERT_OBJECT\", svg_tag)\n )", "def get_svgout(self):\n return tempfile.mktemp(dir=self.tmpdir, suffix='.svg')", "def save_svg(string, file_name):\n file_handle = file(file_name, \"w\")\n file_handle.write(string)\n file_handle.close()", "def create_svg(self, name_dict):\n s = StringIO.StringIO()\n for svg_line in open(self.options.input_file, 'r').readlines():\n # Modify the line to handle replacements from extension GUI\n svg_line = self.expand_extra_vars(svg_line, name_dict)\n # Modify the line to handle variables in svg file\n svg_line = self.expand_vars(svg_line, name_dict)\n s.write(svg_line)\n # Modify the svg to include or exclude groups\n root = etree.fromstring(s.getvalue())\n self.filter_layers(root, name_dict)\n svgout = self.get_svgout()\n try:\n f = open(svgout, 'w')\n f.write(etree.tostring(root,\n encoding='utf-8',\n xml_declaration=True))\n except IOError:\n errormsg(_('Cannot open \"' + svgout + '\" for writing'))\n finally:\n f.close()\n s.close()\n return svgout", "def write_footer(out):\n\n out.write(\"\"\" </g>\n</svg>\n\"\"\")", "def svg_mask_image(output_name, img_name, mask_name, size, img_write_method, use_gzip):\n with get_writer(output_name, use_gzip) as f:\n f.write(svg_preamble)\n f.write(svg_doctype)\n f.write('<svg width=\"{}\" height=\"{}\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">\\n'.format(size[0], size[1]))\n write_mask(f, mask_name, size, img_write_method)\n write_img(f, img_name, size, img_write_method)\n f.write('</svg>\\n')", "def upload_svg(filename, xml_string):\n s3 = boto3.client('s3')\n response = s3.put_object(\n ACL='public-read',\n Body=xml_string,\n Bucket=BUCKET,\n Key=filename,\n StorageClass='REDUCED_REDUNDANCY',\n )\n\n return 'https://s3.amazonaws.com/%s/%s' % (BUCKET, filename)", "def _repr_svg_(self):\n pass", "def output_svg(lines, regressions, requested_width, requested_height):\n \n (global_min_x, _), (global_max_x, global_max_y) = bounds(lines)\n max_up_slope, min_down_slope = bounds_slope(regressions)\n \n #output\n global_min_y = 0\n x = global_min_x\n y = global_min_y\n w = global_max_x - global_min_x\n h = global_max_y - global_min_y\n font_size = 16\n line_width = 2\n \n pic_width, pic_height = compute_size(requested_width, requested_height\n , w, h)\n \n def cw(w1):\n \"\"\"Converts a revision difference to display width.\"\"\"\n return (pic_width / float(w)) * w1\n def cx(x):\n \"\"\"Converts a revision to a horizontal display position.\"\"\"\n return cw(x - global_min_x)\n\n def ch(h1):\n \"\"\"Converts a time difference to a display height.\"\"\"\n return -(pic_height / float(h)) * h1\n def cy(y):\n \"\"\"Converts a time to a vertical display position.\"\"\"\n return pic_height + ch(y - global_min_y)\n \n print '<!--Picture height %.2f corresponds to bench value %.2f.-->' % (\n pic_height, h)\n print '<svg',\n print 'width=%s' % qa(str(pic_width)+'px')\n print 'height=%s' % qa(str(pic_height)+'px')\n print 'viewBox=\"0 0 %s %s\"' % (str(pic_width), str(pic_height))\n print 'onclick=%s' % qa(\n \"var event = arguments[0] || window.event;\"\n \" if (event.shiftKey) { highlightRevision(null); }\"\n \" if (event.ctrlKey) { highlight(null); }\"\n \" return false;\")\n print 'xmlns=\"http://www.w3.org/2000/svg\"'\n print 'xmlns:xlink=\"http://www.w3.org/1999/xlink\">'\n \n print \"\"\"\n<defs>\n <marker id=\"circleMark\"\n viewBox=\"0 0 2 2\" refX=\"1\" refY=\"1\"\n markerUnits=\"strokeWidth\"\n markerWidth=\"2\" markerHeight=\"2\"\n orient=\"0\">\n <circle cx=\"1\" cy=\"1\" r=\"1\"/>\n </marker>\n</defs>\"\"\"\n \n #output the revisions\n print \"\"\"\n<script type=\"text/javascript\">//<![CDATA[\n var previousRevision;\n var previousRevisionFill;\n var previousRevisionStroke\n function highlightRevision(id) {\n if (previousRevision == id) return;\n\n document.getElementById('revision').firstChild.nodeValue = 'r' + id;\n document.getElementById('rev_link').setAttribute('xlink:href',\n 'http://code.google.com/p/skia/source/detail?r=' + id);\n \n var preRevision = document.getElementById(previousRevision);\n if (preRevision) {\n preRevision.setAttributeNS(null,'fill', previousRevisionFill);\n preRevision.setAttributeNS(null,'stroke', previousRevisionStroke);\n }\n \n var revision = document.getElementById(id);\n previousRevision = id;\n if (revision) {\n previousRevisionFill = revision.getAttributeNS(null,'fill');\n revision.setAttributeNS(null,'fill','rgb(100%, 95%, 95%)');\n \n previousRevisionStroke = revision.getAttributeNS(null,'stroke');\n revision.setAttributeNS(null,'stroke','rgb(100%, 90%, 90%)');\n }\n }\n//]]></script>\"\"\"\n \n def print_rect(x, y, w, h, revision):\n \"\"\"Outputs a revision rectangle in display space,\n taking arguments in revision space.\"\"\"\n disp_y = cy(y)\n disp_h = ch(h)\n if disp_h < 0:\n disp_y += disp_h\n disp_h = -disp_h\n \n print '<rect id=%s x=%s y=%s' % (qa(revision), qa(cx(x)), qa(disp_y),),\n print 'width=%s height=%s' % (qa(cw(w)), qa(disp_h),),\n print 'fill=\"white\"',\n print 'stroke=\"rgb(98%%,98%%,88%%)\" stroke-width=%s' % qa(line_width),\n print 'onmouseover=%s' % qa(\n \"var event = arguments[0] || window.event;\"\n \" if (event.shiftKey) {\"\n \" highlightRevision('\"+str(revision)+\"');\"\n \" return false;\"\n \" }\"),\n print ' />'\n \n xes = set()\n for line in lines.itervalues():\n for point in line:\n xes.add(point[0])\n revisions = list(xes)\n revisions.sort()\n \n left = x\n current_revision = revisions[0]\n for next_revision in revisions[1:]:\n width = (((next_revision - current_revision) / 2.0)\n + (current_revision - left))\n print_rect(left, y, width, h, current_revision)\n left += width\n current_revision = next_revision\n print_rect(left, y, x+w - left, h, current_revision)\n\n #output the lines\n print \"\"\"\n<script type=\"text/javascript\">//<![CDATA[\n var previous;\n var previousColor;\n var previousOpacity;\n function highlight(id) {\n if (previous == id) return;\n\n document.getElementById('label').firstChild.nodeValue = id;\n\n var preGroup = document.getElementById(previous);\n if (preGroup) {\n var preLine = document.getElementById(previous+'_line');\n preLine.setAttributeNS(null,'stroke', previousColor);\n preLine.setAttributeNS(null,'opacity', previousOpacity);\n\n var preSlope = document.getElementById(previous+'_linear');\n if (preSlope) {\n preSlope.setAttributeNS(null,'visibility', 'hidden');\n }\n }\n\n var group = document.getElementById(id);\n previous = id;\n if (group) {\n group.parentNode.appendChild(group);\n \n var line = document.getElementById(id+'_line');\n previousColor = line.getAttributeNS(null,'stroke');\n previousOpacity = line.getAttributeNS(null,'opacity');\n line.setAttributeNS(null,'stroke', 'blue');\n line.setAttributeNS(null,'opacity', '1');\n \n var slope = document.getElementById(id+'_linear');\n if (slope) {\n slope.setAttributeNS(null,'visibility', 'visible');\n }\n }\n }\n//]]></script>\"\"\"\n for label, line in lines.items():\n print '<g id=%s>' % qa(label)\n r = 128\n g = 128\n b = 128\n a = .10\n if label in regressions:\n regression = regressions[label]\n min_slope = regression.find_min_slope()\n if min_slope < 0:\n d = max(0, (min_slope / min_down_slope))\n g += int(d*128)\n a += d*0.9\n elif min_slope > 0:\n d = max(0, (min_slope / max_up_slope))\n r += int(d*128)\n a += d*0.9\n \n slope = regression.slope\n intercept = regression.intercept\n min_x = regression.min_x\n max_x = regression.max_x\n print '<polyline id=%s' % qa(str(label)+'_linear'),\n print 'fill=\"none\" stroke=\"yellow\"',\n print 'stroke-width=%s' % qa(abs(ch(regression.serror*2))),\n print 'opacity=\"0.5\" pointer-events=\"none\" visibility=\"hidden\"',\n print 'points=\"',\n print '%s,%s' % (str(cx(min_x)), str(cy(slope*min_x + intercept))),\n print '%s,%s' % (str(cx(max_x)), str(cy(slope*max_x + intercept))),\n print '\"/>'\n \n print '<polyline id=%s' % qa(str(label)+'_line'),\n print 'onmouseover=%s' % qa(\n \"var event = arguments[0] || window.event;\"\n \" if (event.ctrlKey) {\"\n \" highlight('\"+str(label).replace(\"'\", \"\\\\'\")+\"');\"\n \" return false;\"\n \" }\"),\n print 'fill=\"none\" stroke=\"rgb(%s,%s,%s)\"' % (str(r), str(g), str(b)),\n print 'stroke-width=%s' % qa(line_width),\n print 'opacity=%s' % qa(a),\n print 'points=\"',\n for point in line:\n print '%s,%s' % (str(cx(point[0])), str(cy(point[1]))),\n print '\"/>'\n\n print '</g>'\n\n #output the labels\n print '<text id=\"label\" x=\"0\" y=%s' % qa(font_size),\n print 'font-size=%s> </text>' % qa(font_size)\n\n print '<a id=\"rev_link\" xlink:href=\"\" target=\"_top\">'\n print '<text id=\"revision\" x=\"0\" y=%s style=\"' % qa(font_size*2)\n print 'font-size: %s; ' % qe(font_size)\n print 'stroke: #0000dd; text-decoration: underline; '\n print '\"> </text></a>'\n\n print '</svg>'", "def fix_svg(svg):\n xml = ET.fromstring(svg)\n for x in xml.findall('path'):\n x.attrib['fill'] = '#ffffff'\n x.attrib['stroke-width'] = '1'\n x.attrib['opacity'] = '1'\n x.attrib['stroke'] = '#ff0000'\n return ET.tostring(xml)", "def save(filename, canvas):\n data = write_svg.to_string(canvas).encode('utf-8')\n with gzip.open(filename, 'wb') as f:\n f.write(data)", "def post_process_svg(self):\n post_processor = PostProcessor(svg_path=self.rendered_file_path)\n\n post_processor.post_process(graph_representation=self.graph_representation)\n\n post_processor.write()\n\n self.display.display(\"The graph has been exported to {}\".format(self.rendered_file_path))\n\n return self.rendered_file_path", "def getHash(self, hashtype='sha1'):\n if not self.svghash256:\n blob_reader = blobstore.BlobReader(self.svgBlob)\n digest = hashlib.sha256(blob_reader.read()).digest()\n self.svghash256 = \"sha256-%s\" % (base64.b64encode(digest))\n self.put() # write back hash\n if not self.svghash:\n blob_reader = blobstore.BlobReader(self.svgBlob)\n digest = hashlib.sha1(blob_reader.read()).digest()\n self.svghash = \"sha1-%s\" % (base64.b64encode(digest))\n self.put() # write back hash\n if hashtype=='sha1':\n return \"%s\" % (self.svghash)\n elif hashtype == 'sha256':\n return \"%s\" % (self.svghash256)\n elif hashtype == 'both':\n return \"%s %s\" % (self.svghash,self.svghash256)", "def save(self, *args, **kwargs):\n if self.icon:\n xml = svg.validate_svg(self.icon.file.read())\n square = svg.make_square(xml)\n colors = svg.color_icon(square)\n super(Issue, self).save(*args, **kwargs)\n for key, content in colors.items():\n filename = self.icon_color(key)\n if self.icon.storage.exists(filename):\n self.icon.storage.delete(filename)\n self.icon.storage.save(filename, svg.as_file(content))\n else:\n super(Issue, self).save(*args, **kwargs)", "def svg2png (fName, width=600, app=None, oFilename=\"\"):\n from PyQt5.QtSvg import QSvgRenderer\n from PyQt5.QtGui import QImage, QPainter, QColor, QGuiApplication\n from math import sqrt\n\n if not app:\n app=QGuiApplication([])\n svg, w, h = openSVG(fName)\n groups = svg.getElementsByTagName(\"g\")\n scale = width/w\n for g in groups:\n if \"stroke-width\" in g.attributes:\n g.setAttribute(\"stroke-width\", str(float(g.getAttribute(\"stroke-width\"))/sqrt(scale)))\n qsr=QSvgRenderer(svg.toxml().encode(\"utf-8\"))\n img=QImage(int(w*scale), int(h*scale), QImage.Format_ARGB32)\n img.fill(QColor(\"white\"))\n p=QPainter(img)\n qsr.render(p)\n p.end()\n if not oFilename:\n oFilename = re.sub(r\"\\.svg$\", f\"-{width}px.png\", fName)\n img.save(oFilename)\n return oFilename", "def sauver_svg(pcanvas,pgrille,pliste_cubes,pcube_visu):\n global grille\n grille = pgrille\n\n fichier = filedialog.asksaveasfilename(\n defaultextension=\".svg\",\n filetypes=((\"SVG files\", \".svg\"),(\"All files\", \".*\")))\n if not fichier:\n return # l'utilisateur a annule ou ferme la fenetre\n try:\n f = open(fichier, \"w\", encoding = \"utf-8\")\n except FileNotFoundError:\n messagebox.showerror(title=\"Error\",\n message=\"Erreur fichier non trouvé\")\n except IOError:\n messagebox.showerror(title=\"Error\",\n message=\"Le fichier n'existe pas\")\n else:\n # Ecriture du header xml, puis d'une viewbox,\n # qui est en realite comme notre canvas\n f.write(\"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\"\n + \"<!DOCTYPE svg PUBLIC \\\"-//W3C//DTD SVG 1.1//EN\\\" \"\n + \"\\\"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd\\\">\\n\")\n f.write(\"<svg viewBox=\" + \"\\\"0 0 \"\n + str(pcanvas.cget(\"width\")) + \" \"\n + str(pcanvas.cget(\"height\")) + \"\\\" \"\n + \"xmlns=\\\"http://www.w3.org/2000/svg\\\">\\n\")\n _dessiner_grille_svg(f)\n \n # pas optimise, mais facultatif\n \n liste = pcanvas.find_all()[grille.taille_x+grille.taille_y+3:]\n # a partir du nb de lignes+1 jusqu'a la fin : les faces des cubes\n # note : les id commencent a 1\n \n for i in range(0,len(liste),3):\n if liste[i] != pcube_visu.id:\n # on a un id de cube, il nous faut l'objet pour ses coordonnees\n for c in pliste_cubes:\n if c.id == liste[i]:\n cube = c\n break\n # cube est le cube correspondant a l'id i\n coords2D = grille.canvas_to_grille(cube.coords)\n _dessiner_cube_svg(coords2D,f,cube.h,cube.couleur)\n f.write(\"</svg>\")\n f.close()", "def show_svg(tmp_path = DEFAULT_PATH): \n global show_counter\n file_name = tmp_path + \"show_tmp_file_{}.svg\".format(show_counter)\n plt.savefig(file_name)\n os.system(\"open {}\".format(file_name))\n show_counter += 1\n plt.close()", "def getSvgHtml(svgFile, width, height):\n html = '<object type=\"image/svg+xml\" data=\"%s\" width=\"%s\" height=\"%s\"/>'\n return html % (svgFile, width, height)", "def save_canvas_svg(self, filename):\n canvasvg.saveall(filename, self.canvas)", "def post_drawing_svg(self, request):\n HttpRequest = request.to_http_info(self.api_client.configuration)\n return self.__make_request(HttpRequest, 'POST', 'file')", "def _add_checksum(self, dct):\n md5_cksum = None", "def append_checksum(file_path):\n\n cksum = calc_file_crc32(file_path)\n f = open(file_path, 'ab')\n f.write(struct.pack('<I', cksum))\n f.close()", "def save_svg(self, dx_nodes):\r\n\r\n for _dx_node in dx_nodes:\r\n self.dwg.add(_dx_node.svg_node)\r\n\r\n self.dwg.save()", "def post(self):\n try:\n parser = reqparse.RequestParser()\n parser.add_argument(\"svg\")\n args = parser.parse_args()\n data = args[\"svg\"]\n\n subprocess.run([\"axicli\", \"--mode\", \"manual\", \"-M\", \"enable_xy\"])\n subprocess.run([\"axicli\", \"--mode\", \"manual\", \"-M\", \"raise_pen\"])\n with tempfile.NamedTemporaryFile(dir=\"C:\\\\crap\", delete=False) as fp:\n fp.write(data.encode())\n fp.seek(0)\n print(fp.name)\n time.sleep(1)\n subprocess.run([\"axicli\", fp.name])\n\n return self.get()\n\n except:\n abort(500, reason=traceback.format_exc())", "def _make_svg_script(self):\n self.script = '''\n var rLabel = \"%(label)s\";\n var report = addReport(350, 200, rLabel, \"\");\n ''' % {'label': self.label}\n\n self.script += '''\n var rsUri = \"%(instance_endpoint)s?_uri=%(uri_encoded)s\";\n var rsLabel = \"%(label)s\";\n var repSystem = addReportingSystem(350, 20, rsLabel, rsUri);\n addLink(report, repSystem, \"proms:reportingSystem\", RIGHT);\n ''' % {\n 'instance_endpoint': self.endpoints['instance'],\n 'uri_encoded': self.rs_encoded,\n 'label': self.rs_label\n }\n\n if self.sa is not None and self.ea is not None:\n if self.sa == self.ea:\n # External Report -- single Activity\n self.script += '''\n var uri = \"%(instance_endpoint)s?_uri=%(uri_encoded)s\";\n var label = \"%(label)s\";\n var activity = addActivity(50, 200, label, uri);\n addLink(report, activity, \"proms:startingActivity\", TOP);\n addLink(report, activity, \"proms:endingActivity\", BOTTOM);\n ''' % {\n 'instance_endpoint': self.endpoints['instance'],\n 'uri_encoded': urllib.parse.quote(self.sa),\n 'label': self.sa_label\n }\n else:\n # Internal Report -- 2 Activities\n self.script += '''\n var saUri = \"%(instance_endpoint)s?_uri=%(uri_encoded)s\";\n var saLabel = \"%(label)s\";\n var sacActivity = addActivity(50, 120, sacLabel, sacUri);\n addLink(report, sacActivity, \"proms:startingActivity\", TOP);\n ''' % {\n 'instance_endpoint': self.endpoints['instance'],\n 'uri_encoded': urllib.parse.quote(self.sa),\n 'label': self.sa_label\n }\n\n self.script += '''\n var eacUri = \"%(instance_endpoint)s?_uri=%(uri_encoded)s\";\n var eacLabel = \"%(label)s\";\n var eacActivity = addActivity(50, 280, eacLabel, eacUri);\n addLink(report, eacActivity, \"proms:endingActivity\", BOTTOM);\n ''' % {\n 'instance_endpoint': self.endpoints['instance'],\n 'uri_encoded': urllib.parse.quote(self.ea),\n 'label': self.ea_label\n }\n else:\n # Basic Report -- no Activities\n pass", "def create_svg_name(self):\n for l in self.data:\n d = self.get_line_desc(l)\n self.svgouts[tuple(l)] = self.create_svg(d)", "def gen_symbols(path, strip):\n\n symbols = ''\n svg_namespace = 'http://www.w3.org/2000/svg'\n etree.register_namespace('', svg_namespace)\n\n for root, dirs, files in os.walk(os.path.abspath(path)):\n for wwsfile in files:\n basename, extension = os.path.splitext(wwsfile)\n if extension == '.svg':\n filepath = os.path.join(root, wwsfile)\n try:\n svg = etree.parse(filepath)\n svg_root = svg.getroot()\n\n attribs = svg_root.attrib\n desc = svg.find('{'+svg_namespace+'}desc')\n svg_root.remove(desc)\n title = svg.find('{'+svg_namespace+'}title')\n svg_root.remove(title)\n metadata = svg.find('{'+svg_namespace+'}metadata')\n svg_root.remove(metadata)\n\n viewbox_attrib = 'viewBox'\n if viewbox_attrib in attribs:\n viewbox = attribs[viewbox_attrib]\n else:\n viewbox = f\"0 0 {attribs['width']} {attribs['height']}\"\n\n basename2 = basename.replace(strip, '')\n symbols += f'<symbol id=\"{basename2}\" viewBox=\"{viewbox}\">'\n\n for element in svg_root:\n symbols += etree.tostring(element).decode('utf-8')\n symbols += '</symbol>'\n\n except Exception as err:\n warnings.warn(f'Could not parse file {filepath}: {err}')\n\n return symbols", "def _write(self, stream):\n\n self._img.append(self.make_path())\n self._img.append(self.make_border())\n self._img.append(self.make_text())\n\n ET.ElementTree(self._img).write(stream, encoding=\"UTF-8\", xml_declaration=True)", "def export_as_svg(self):\n from ExportCommand import ExportCommand\n\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n file_path, _ = QFileDialog.getSaveFileName(self, \"Export as svg\", os.getcwd(), \"svg file(*.svg)\",\n options=options)\n if file_path:\n cmd = ExportCommand(self.graphicsView.scene(), 'svg')\n cmd.display_message.connect(self.onAddMessage)\n if cmd.execute(file_path):\n QMessageBox.information(self, self.tr('Information'), self.tr('Successfully export to svg file'))\n else:\n QMessageBox.information(self, self.tr('Error'), self.tr('Fail to export to svg file'))", "def dvi_to_svg(dvi_file, regen_if_exists=False):\n result = dvi_file.replace(\".dvi\", \".svg\")\n if not os.path.exists(result):\n commands = [\n \"dvisvgm\",\n dvi_file,\n \"-n\",\n \"-v\",\n \"0\",\n \"-o\",\n result,\n \">\",\n get_null()\n ]\n os.system(\" \".join(commands))\n return result", "def image_svg():\n data = resource(\"images/svg_logo.svg\")\n return Response(data, headers={\"Content-Type\": \"image/svg+xml\"})", "def json2svg(json_f, path_out, pathway_iri, wp_id, pathway_version, theme):\n\n dir_out = path.dirname(path_out)\n # example base_out: 'WP4542.svg'\n base_out = path.basename(path_out)\n [stub_out, ext_out_with_dot] = path.splitext(base_out)\n\n pvjs_cmd = f\"pvjs --theme {theme}\"\n with open(json_f, \"r\") as f_in:\n with open(path_out, \"w\") as f_out:\n pvjs_ps = subprocess.Popen(\n shlex.split(pvjs_cmd), stdin=f_in, stdout=f_out, shell=False\n )\n pvjs_ps.communicate()[0]\n\n tree = ET.parse(path_out, parser=parser)\n root = tree.getroot()\n\n #############################\n # SVG > .svg\n #############################\n\n # TODO: make the stand-alone SVGs work for upload to WM Commons:\n # https://www.mediawiki.org/wiki/Manual:Coding_conventions/SVG\n # https://commons.wikimedia.org/wiki/Help:SVG\n # https://commons.wikimedia.org/wiki/Commons:Commons_SVG_Checker?withJS=MediaWiki:CommonsSvgChecker.js\n # W3 validator: http://validator.w3.org/#validate_by_upload+with_options\n\n # WM says: \"the recommended image height is around 400–600 pixels. When a\n # user views the full size image, a width of 600–800 pixels gives\n # them a good close-up view\"\n # https://commons.wikimedia.org/wiki/Help:SVG#Frequently_asked_questions\n root.set(\"width\", \"800px\")\n root.set(\"height\", \"600px\")\n\n # TODO: verify that all of the following cases are now correctly handled in pvjs\n for style_el in root.findall(\".//style\"):\n if not style_el.text == \"\":\n raise Exception(\"Expected empty style sheets.\")\n for el in root.findall(\".//pattern[@id='PatternQ47512']\"):\n raise Exception(\"Unexpected pattern.\")\n\n edge_warning_sent = False\n for el in root.xpath(\n \".//svg:g/svg:g[contains(@class,'Edge')]/svg:g\", namespaces=SVG_NS\n ):\n if not edge_warning_sent:\n print(\"TODO: update pvjs to avoid having nested g elements for edges.\")\n edge_warning_sent = True\n # raise Exception(\"Unexpected nested g element for edge.\")\n\n for el in root.xpath(\n \"/svg:svg/svg:g/svg:g[contains(@class,'Edge')]/svg:path/@style\",\n namespaces=SVG_NS,\n ):\n raise Exception(\n \"Unexpected style attribute on path element for edge.\", namespaces=SVG_NS\n )\n\n for el in root.xpath(\n \"/svg:svg/svg:defs/svg:g[@id='jic-defs']/svg:svg/svg:defs\", namespaces=SVG_NS\n ):\n raise Exception(\"Unexpected nested svg for defs.\")\n\n for el in root.findall(\".//defs/g[@id='jic-defs']/svg/defs\"):\n raise Exception(\"Unexpected nested svg for defs.\")\n\n for el in root.xpath(\n \".//svg:g/svg:g[contains(@class,'Edge')]/svg:path/@style\", namespaces=SVG_NS\n ):\n raise Exception(\"Unexpected style attribute on path element for edge.\")\n\n # TODO: should any of this be in pvjs instead?\n style_selector = (\n \"[@style='color:inherit;fill:inherit;fill-opacity:inherit;stroke:inherit;stroke-width:inherit']\"\n )\n for el_parent in root.findall(f\".//*{style_selector}/..\"):\n stroke_width = el_parent.attrib.get(\"stroke-width\", 1)\n for el in root.findall(f\".//*{style_selector}\"):\n el.set(\n \"style\",\n f\"color:inherit;fill:inherit;fill-opacity:inherit;stroke:inherit;stroke-width:{str(stroke_width)}\",\n )\n\n for el in root.findall(\".//*[@filter='url(#kaavioblackto000000filter)']\"):\n el.attrib.pop(\"filter\", None)\n\n for image_parent in root.findall(\".//*image/..\"):\n images = image_parent.findall(\"image\")\n for image in images:\n image_parent.remove(image)\n\n # TODO: do the attributes \"filter\" \"fill\" \"fill-opacity\" \"stroke\" \"stroke-dasharray\" \"stroke-width\"\n # on the top-level g element apply to the g elements for edges?\n\n # TODO: do the attributes \"color\" \"fill\" \"fill-opacity\" \"stroke\" \"stroke-dasharray\" \"stroke-width\"\n # on the top-level g element apply to the path elements for edges?\n\n # TODO: Which of the following is correct?\n # To make the SVG file independent of Arial, change all occurrences of\n # font-family: Arial to font-family: 'Liberation Sans', Arial, sans-serif\n # https://commons.wikimedia.org/wiki/Help:SVG#fallback\n # vs.\n # Phab:T64987, Phab:T184369, Gnome #95; font-family=\"'font name'\"\n # (internally quoted font family name) does not work\n # (File:Mathematical_implication_diagram-alt.svg, File:T184369.svg)\n # https://commons.wikimedia.org/wiki/Commons:Commons_SVG_Checker?withJS=MediaWiki:CommonsSvgChecker.js\n\n # Liberation Sans is the open replacement for Arial, but its kerning\n # has some issues, at least as processed by librsvg.\n # An alternative that is also supported MW is DejaVu Sans. Using\n # transform=\"scale(0.92,0.98)\"\n # might yield better kerning and take up about the same amount of space.\n\n # Long-term, should we switch our default font from Arial to something prettier?\n # It would have to be a well-supported font.\n # This page <https://commons.wikimedia.org/wiki/Help:SVG#fallback> says:\n # On Commons, librsvg has the fonts listed in:\n # https://meta.wikimedia.org/wiki/SVG_fonts#Latin_(basic)_fonts_comparison\n # ...\n # In graphic illustrations metric exact text elements are often important\n # and Arial can be seen as de-facto standard for such a feature.\n\n for el in root.xpath(\".//*[contains(@font-family,'Arial')]\", namespaces=SVG_NS):\n el.set(\"font-family\", \"'Liberation Sans', Arial, sans-serif\")\n\n # TODO: do we need to specify fill=currentColor for any elements?\n\n for el in root.xpath(\".//svg:defs//svg:marker//*[not(@fill)]\", namespaces=SVG_NS):\n el.set(\"fill\", \"currentColor\")\n\n for el in root.xpath(\".//svg:text[@stroke-width='0.05px']\", namespaces=SVG_NS):\n el.attrib.pop(\"stroke-width\", None)\n\n for el in root.xpath(\".//svg:text[@overflow]\", namespaces=SVG_NS):\n el.attrib.pop(\"overflow\", None)\n\n for el in root.xpath(\".//svg:text[@dominant-baseline]\", namespaces=SVG_NS):\n el.attrib.pop(\"dominant-baseline\", None)\n\n for el in root.xpath(\".//svg:text[@clip-path]\", namespaces=SVG_NS):\n el.attrib.pop(\"clip-path\", None)\n\n FONT_SIZE_RE = re.compile(r\"^([0-9.]*)px$\")\n # TRANSLATE_RE = re.compile(r\"^translate[(]([0-9.]*),([0-9.]*)[)]$\")\n TRANSLATE_RE = re.compile(r\"^translate\\(([0-9.]*),([0-9.]*)\\)$\")\n # We are pushing the text down based on font size.\n # This is needed because librsvg doesn't support attribute \"alignment-baseline\".\n\n for el in root.xpath(\".//svg:text[@font-size]\", namespaces=SVG_NS):\n font_size_full = el.attrib.get(\"font-size\")\n font_size_matches = re.search(FONT_SIZE_RE, font_size_full)\n if font_size_matches:\n font_size = float(font_size_matches.group(1))\n\n if not font_size:\n font_size = 5\n\n x_translation = None\n y_translation = None\n transform_full = el.attrib.get(\"transform\")\n if transform_full:\n translate_matches = re.search(TRANSLATE_RE, transform_full)\n if translate_matches:\n x_translation = float(translate_matches.group(1))\n y_translation_uncorrected = float(translate_matches.group(2))\n\n if not x_translation:\n x_translation = 0\n y_translation_uncorrected = 0\n\n y_translation_corrected = font_size / 3 + y_translation_uncorrected\n el.set(\"transform\", f\"translate({x_translation},{y_translation_corrected})\")\n\n # Add link outs\n WIKIDATA_CLASS_RE = re.compile(\"Wikidata_Q[0-9]+\")\n for el in root.xpath(\".//*[contains(@class,'DataNode')]\", namespaces=SVG_NS):\n wikidata_classes = list(\n filter(WIKIDATA_CLASS_RE.match, el.attrib.get(\"class\").split(\" \"))\n )\n if len(wikidata_classes) > 0:\n # if there are multiple, we just link out to the first\n wikidata_id = wikidata_classes[0].replace(\"Wikidata_\", \"\")\n el.tag = \"{http://www.w3.org/2000/svg}a\"\n # linkout_base = \"https://www.wikidata.org/wiki/\"\n linkout_base = \"https://scholia.toolforge.org/\"\n el.set(\"{http://www.w3.org/1999/xlink}href\", linkout_base + wikidata_id)\n\n # make linkout open in new tab/window\n el.set(\"target\", \"_blank\")\n\n ###########\n # Run SVGO\n ###########\n\n pre_svgo_svg_f = f\"{dir_out}/{stub_out}.pre_svgo.svg\"\n tree.write(pre_svgo_svg_f)\n\n tree.write(path_out)\n args = shlex.split(\n f'svgo --multipass --config \"{SCRIPT_DIR}/svgo-config.json\" {path_out}'\n )\n subprocess.run(args)\n\n #########################################\n # Future enhancements for pretty version\n #########################################\n\n # TODO: convert the following bash code into Python\n\n # Glyphs from reactome\n # TODO: how about using these: https://reactome.org/icon-lib\n # for example, mitochondrion: https://reactome.org/icon-lib?f=cell_elements#Mitochondrion.svg\n # They appear to be CC-4.0, which might mean we can't upload them to WM Commons?\n\n # Glyphs from SMILES\n # metabolite_patterns_css_f = (\n # f\"{dir_out}/{bare_stub_out}.metabolite-patterns-uri.css\"\n # )\n # metabolite_patterns_svg_f = (\n # f\"{dir_out}/{bare_stub_out}.metabolite-patterns-uri.svg\"\n # )\n #\n # if path.exists(metabolite_patterns_svg_f) and path.exists(\n # metabolite_patterns_css_f\n # ):\n # print(\n # f\"{metabolite_patterns_svg_f} & {metabolite_patterns_css_f} already exist. To overwrite, delete them & try again.\"\n # )\n # else:\n # # If only one of them exists, we recreate both\n # if path.exists(metabolite_patterns_svg_f):\n # os.remove(metabolite_patterns_svg_f)\n # elif path.exists(metabolite_patterns_css_f):\n # os.remove(metabolite_patterns_css_f)\n #\n # metabolite_patterns_svg_tree = ET.parse(\n # \"<svg><defs></defs></svg>\", parser=parser\n # )\n # metabolite_patterns_svg_root = metabolite_patterns_svg_tree.getroot()\n #\n # # TODO convert the following sh script to Python\n # \"\"\"\n # jq -r '[.entitiesById[] | select(.type | contains([\"Metabolite\"]))] | unique_by(.type)[] | [.xrefDataSource, .xrefIdentifier, [.type[] | select(startswith(\"wikidata:\"))][0], [.type[] | select(startswith(\"hmdb:\") and length == 14)][0]] | @tsv' \"$json_f\" | \\\n # while IFS=$'\\t' read -r data_source identifier wikidata_id hmdb_id; do\n # wikidata_identifier=$(echo \"$wikidata_id\" | sed 's/wikidata://');\n # bridgedb_request_uri=\"http://webservice.bridgedb.org/Human/attributes/$data_source/$identifier?attrName=SMILES\"\n # if [ -z \"$data_source\" ] || [ -z \"$identifier\" ]; then\n # echo \"Missing Xref data source and/or identifier in $stub_out\";\n # continue;\n # fi\n #\n # smiles=$(curl -Ls \"$bridgedb_request_uri\")\n # bridgedb_request_status=$?\n #\n # if [ \"$bridgedb_request_status\" != 0 ] || [ -z \"$smiles\" ] || [[ \"$smiles\" =~ 'The server has not found anything matching the request URI' ]]; then\n # # if [ \"$bridgedb_request_status\" != 0 ]; then\n # # echo \"Failed to get SMILES string for $stub_out:$data_source:$identifier from $bridgedb_request_uri (status code: $bridgedb_request_status)\";\n # # elif [ -z \"$smiles\" ]; then\n # # echo \"Failed to get SMILES string for $stub_out:$data_source:$identifier from $bridgedb_request_uri (nothing returned)\";\n # # elif [[ \"$smiles\" =~ 'The server has not found anything matching the request URI' ]]; then\n # # echo \"Failed to get SMILES string for $stub_out:$data_source:$identifier from $bridgedb_request_uri\";\n # # echo '(The server has not found anything matching the request URI)'\n # # fi\n #\n # # If the DataSource and Identifier specified don't get us a SMILES string,\n # # it could be because BridgeDb doesn't support queries for that DataSource.\n # # For example, WP396_97382 has a DataNode with PubChem-compound:3081372,\n # # http://webservice.bridgedb.org/Human/attributes/PubChem-compound/3081372?attrName=SMILES\n # # doesn't return anything. However, that DataNode can be mapped to HMDB:HMDB61196, and\n # # the url http://webservice.bridgedb.org/Human/attributes/HMDB/HMDB61196\n # # does return a SMILES string.\n # # Note that BridgeDb currently requires us to use the 5 digit HMDB identifier,\n # # even though there is another format that uses more digits.\n #\n # if [ ! -z \"$hmdb_id\" ]; then\n # hmdb_identifier=\"HMDB\"${hmdb_id:(-5)};\n # bridgedb_request_uri_orig=\"$bridgedb_request_uri\"\n # bridgedb_request_uri=\"http://webservice.bridgedb.org/Human/attributes/HMDB/$hmdb_identifier?attrName=SMILES\"\n # #echo \"Trying alternate bridgedb_request_uri: $bridgedb_request_uri\"\n # smiles=$(curl -Ls \"$bridgedb_request_uri\")\n # bridgedb_request_status=$?\n # if [ \"$bridgedb_request_status\" != 0 ]; then\n # echo \"Failed to get SMILES string for $stub_out:$data_source:$identifier from both $bridgedb_request_uri_orig and alternate $bridgedb_request_uri (status code: $bridgedb_request_status)\";\n # continue;\n # elif [ -z \"$smiles\" ]; then\n # echo \"Failed to get SMILES string for $stub_out:$data_source:$identifier from both $bridgedb_request_uri_orig and alternate $bridgedb_request_uri (nothing returned)\";\n # continue;\n # elif [[ \"$smiles\" =~ 'The server has not found anything matching the request URI' ]]; then\n # echo \"Failed to get SMILES string for $stub_out:$data_source:$identifier from both $bridgedb_request_uri_orig and alternate $bridgedb_request_uri\";\n # echo '(The server has not found anything matching the request URI)'\n # continue;\n # fi\n # else\n # continue;\n # fi\n # fi\n #\n # smiles_url_encoded=$(echo \"$smiles\" | jq -Rr '@uri')\n # cdkdepict_url=\"http://www.simolecule.com/cdkdepict/depict/bow/svg?smi=$smiles_url_encoded&abbr=on&hdisp=bridgehead&showtitle=false&zoom=1.0&annotate=none\"\n #\n # cat >> \"$css_out\" <<EOF\n # [typeof~=\"wikidata:$wikidata_identifier\"]:hover > .Icon {\n # cursor: default;\n # fill: url(#Pattern$wikidata_identifier);\n # transform-box: fill-box;\n # transform: scale(2, 3);\n # transform-origin: 50% 50%;\n # }\n # [typeof~=\"wikidata:$wikidata_identifier\"]:hover > .Text {\n # font-size: 0px;\n # }\n # EOF\n #\n # # TODO: do we want to disable the clip-path on hover?\n # #[typeof~=wikidata:$wikidata_identifier]:hover > .Icon {\n # # clip-path: unset;\n # # rx: unset;\n # # ry: unset;\n # # cursor: default;\n # # fill: url(#Pattern$wikidata_identifier);\n # # transform-box: fill-box;\n # # transform: scale(2, 3);\n # # transform-origin: 50% 50%;\n # #}\n #\n # # \"transform-box: fill-box\" is needed for FF.\n # # https://bugzilla.mozilla.org/show_bug.cgi?id=1209061\n #\n # xmlstarlet ed -L \\\n # -s \"/svg/defs\" -t elem -n \"pattern\" -v \"\" \\\n # --var prevpattern '$prev' \\\n # -s '$prevpattern' -t elem -n \"image\" -v \"\" \\\n # --var previmage '$prev' \\\n # -i '$prevpattern' -t attr -n \"id\" -v \"Pattern$wikidata_identifier\" \\\n # -i '$prevpattern' -t attr -n \"width\" -v \"100%\" \\\n # -i '$prevpattern' -t attr -n \"height\" -v \"100%\" \\\n # -i '$prevpattern' -t attr -n \"patternContentUnits\" -v \"objectBoundingBox\" \\\n # -i '$prevpattern' -t attr -n \"preserveAspectRatio\" -v \"none\" \\\n # -i '$prevpattern' -t attr -n \"viewBox\" -v \"0 0 1 1\" \\\n # -i '$previmage' -t attr -n \"width\" -v \"1\" \\\n # -i '$previmage' -t attr -n \"height\" -v \"1\" \\\n # -i '$previmage' -t attr -n \"href\" -v \"$cdkdepict_url\" \\\n # -i '$previmage' -t attr -n \"preserveAspectRatio\" -v \"none\" \\\n # \"$svg_out\"\n # done\n #\n # sed -i '/<style.*>/{\n # r '\"$metabolite_patterns_css_f\"'\n # }' \"$path_out\"\n #\n # sed -i '/<g id=\"jic-defs\">/{\n # r /dev/stdin\n # }' \"$path_out\" < <(xmlstarlet sel -t -c '/svg/defs/*' \"$metabolite_patterns_svg_f\")\n # \"\"\"", "def add_digest(self, text):\n\n return '%s%s%s' % (text, self.HASHSEP, self.digest(text))", "def get(self, ext: str, as_thumb: bool, chrome: bool) -> tuple[str, bytes]:\n\n bext = ext.encode(\"ascii\", \"replace\")\n ext = bext.decode(\"utf-8\")\n zb = hashlib.sha1(bext).digest()[2:4]\n if PY2:\n zb = [ord(x) for x in zb]\n\n c1 = colorsys.hsv_to_rgb(zb[0] / 256.0, 1, 0.3)\n c2 = colorsys.hsv_to_rgb(zb[0] / 256.0, 1, 1)\n ci = [int(x * 255) for x in list(c1) + list(c2)]\n c = \"\".join([\"{:02x}\".format(x) for x in ci])\n\n w = 100\n h = 30\n if not self.args.th_no_crop and as_thumb:\n sw, sh = self.args.th_size.split(\"x\")\n h = int(100 / (float(sw) / float(sh)))\n w = 100\n\n if chrome:\n # cannot handle more than ~2000 unique SVGs\n if HAVE_PIL:\n # svg: 3s, cache: 6s, this: 8s\n from PIL import Image, ImageDraw\n\n h = int(64 * h / w)\n w = 64\n img = Image.new(\"RGB\", (w, h), \"#\" + c[:6])\n pb = ImageDraw.Draw(img)\n try:\n _, _, tw, th = pb.textbbox((0, 0), ext)\n except:\n tw, th = pb.textsize(ext)\n\n tw += len(ext)\n cw = tw // len(ext)\n x = ((w - tw) // 2) - (cw * 2) // 3\n fill = \"#\" + c[6:]\n for ch in ext:\n pb.text((x, (h - th) // 2), \" %s \" % (ch,), fill=fill)\n x += cw\n\n img = img.resize((w * 3, h * 3), Image.NEAREST)\n\n buf = BytesIO()\n img.save(buf, format=\"PNG\", compress_level=1)\n return \"image/png\", buf.getvalue()\n\n elif False:\n # 48s, too slow\n import pyvips\n\n h = int(192 * h / w)\n w = 192\n img = pyvips.Image.text(\n ext, width=w, height=h, dpi=192, align=pyvips.Align.CENTRE\n )\n img = img.ifthenelse(ci[3:], ci[:3], blend=True)\n # i = i.resize(3, kernel=pyvips.Kernel.NEAREST)\n buf = img.write_to_buffer(\".png[compression=1]\")\n return \"image/png\", buf\n\n svg = \"\"\"\\\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<svg version=\"1.1\" viewBox=\"0 0 100 {}\" xmlns=\"http://www.w3.org/2000/svg\"><g>\n<rect width=\"100%\" height=\"100%\" fill=\"#{}\" />\n<text x=\"50%\" y=\"50%\" dominant-baseline=\"middle\" text-anchor=\"middle\" xml:space=\"preserve\"\n fill=\"#{}\" font-family=\"monospace\" font-size=\"14px\" style=\"letter-spacing:.5px\">{}</text>\n</g></svg>\n\"\"\"\n svg = svg.format(h, c[:6], c[6:], ext)\n\n return \"image/svg+xml\", svg.encode(\"utf-8\")", "def svg_draw_quick(svg_img, board, pix_ref):\n RDK.Render(False)\n count = 0\n for path in svg_img:\n count = count + 1\n # use the pixel reference to set the path color, set pixel width and copy as a reference\n pix_ref.Recolor(path.fill_color)\n if PIXELS_AS_OBJECTS:\n pix_ref.Copy()\n np = path.nPoints()\n print('drawing path %i/%i' % (count, len(svg_img)))\n for i in range(np):\n p_i = path.getPoint(i)\n v_i = path.getVector(i)\n\n # Reorient the pixel object along the path\n pt_pose = point2D_2_pose(p_i, v_i)\n \n # add the pixel geometry to the drawing board object, at the calculated pixel pose\n if PIXELS_AS_OBJECTS:\n board.Paste().setPose(pt_pose)\n else:\n board.AddGeometry(pix_ref, pt_pose)\n \n RDK.Render(True)", "def save_graph(self, widget, data=None):\n\t\t#un po' di pulizia prima di fare il salvataggio\n\t\tos.system(\"find ./extra/MonitorGraph/ -type f -not -name '*.png' | xargs rm -f\")\n\t\tsnapshotFile =\"./extra/UserOutput/Snapshot\"+time.strftime(\"%Y%m%d-%H%M\", time.gmtime())+\".tar\"\n\t\tos.system(\"tar -cf \"+snapshotFile+\" --exclude def* --directory ./extra/ MonitorGraph/\")\n\t\tprint \"Snapshot saved to\",snapshotFile", "def write_svg(\n self,\n outfile,\n scaling=10,\n style=None,\n fontstyle=None,\n background=\"#222\",\n pad=\"5%\",\n precision=None,\n ):\n bb = self.get_bounding_box()\n if bb is None:\n return\n close = True\n if hasattr(outfile, \"__fspath__\"):\n outfile = open(outfile.__fspath__(), \"w\")\n elif isinstance(outfile, (basestring, Path)):\n outfile = open(outfile, \"w\")\n else:\n close = False\n if style is None:\n style = {}\n if fontstyle is None:\n fontstyle = {}\n bb *= scaling\n x = bb[0, 0]\n y = -bb[1, 1]\n w = bb[1, 0] - bb[0, 0]\n h = bb[1, 1] - bb[0, 1]\n if background is not None:\n if isinstance(pad, basestring):\n if pad[-1] == \"%\":\n pad = max(w, h) * float(pad[:-1]) / 100\n else:\n pad = float(pad)\n x -= pad\n y -= pad\n w += 2 * pad\n h += 2 * pad\n outfile.write(\n \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<svg xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\"\n width=\"{}\" height=\"{}\" viewBox=\"{} {} {} {}\">\n<defs>\n<style type=\"text/css\">\n\"\"\".format(\n numpy.format_float_positional(w, trim=\"0\", precision=precision),\n numpy.format_float_positional(h, trim=\"0\", precision=precision),\n numpy.format_float_positional(x, trim=\"0\", precision=precision),\n numpy.format_float_positional(y, trim=\"0\", precision=precision),\n numpy.format_float_positional(w, trim=\"0\", precision=precision),\n numpy.format_float_positional(h, trim=\"0\", precision=precision),\n )\n )\n ldkeys, ltkeys = self.get_svg_classes()\n for k in ldkeys:\n l, d = k\n if k in style:\n style_dict = style[k]\n else:\n c = \"rgb({}, {}, {})\".format(\n *[\n int(255 * c + 0.5)\n for c in colorsys.hsv_to_rgb(\n (l % 3) / 3.0 + (l % 6 // 3) / 6.0 + (l // 6) / 11.0,\n 1 - ((l + d) % 8) / 12.0,\n 1 - (d % 3) / 4.0,\n )\n ]\n )\n style_dict = {\"stroke\": c, \"fill\": c, \"fill-opacity\": \"0.5\"}\n outfile.write(\".l{}d{} {{\".format(l, d))\n outfile.write(\" \".join(\"{}: {};\".format(*x) for x in style_dict.items()))\n outfile.write(\"}\\n\")\n for k in ltkeys:\n l, t = k\n if k in fontstyle:\n style_dict = fontstyle[k]\n else:\n c = \"rgb({}, {}, {})\".format(\n *[\n int(255 * c + 0.5)\n for c in colorsys.hsv_to_rgb(\n (l % 3) / 3.0 + (l % 6 // 3) / 6.0 + (l // 6) / 11.0,\n 1 - ((l + t) % 8) / 12.0,\n 1 - (t % 3) / 4.0,\n )\n ]\n )\n style_dict = {\"stroke\": \"none\", \"fill\": c}\n outfile.write(\".l{}t{} {{\".format(l, t))\n outfile.write(\" \".join(\"{}: {};\".format(*x) for x in style_dict.items()))\n outfile.write(\"}\\n\")\n outfile.write(\"</style>\\n\")\n for cell in self.get_dependencies(True):\n cell.to_svg(outfile, scaling, precision, \"\")\n outfile.write(\"</defs>\\n\")\n if background is not None:\n outfile.write(\n '<rect x=\"{}\" y=\"{}\" width=\"{}\" height=\"{}\" fill=\"{}\" stroke=\"none\"/>\\n'.format(\n numpy.format_float_positional(x, trim=\"0\", precision=precision),\n numpy.format_float_positional(y, trim=\"0\", precision=precision),\n numpy.format_float_positional(w, trim=\"0\", precision=precision),\n numpy.format_float_positional(h, trim=\"0\", precision=precision),\n background,\n )\n )\n self.to_svg(outfile, scaling, precision, 'transform=\"scale(1 -1)\"')\n outfile.write(\"</svg>\")\n if close:\n outfile.close()", "def lightenSvgFile(inFname, outFname=\"\"):\n if not outFname:\n # create outFname based on inFname\n if \"-screen\" in inFname:\n outFname = inFname.replace(\"-screen\", \"-print\")\n else:\n outFname = inFname.replace(\".svg\", \"\")+\"-print.svg\"\n if \"-dark\" in outFname:\n outFname = outFname.replace(\"-dark\", \"\")\n with open(inFname) as infile, open(outFname,\"w\") as outfile:\n l=infile.readline()\n while l:\n outfile.write(ligthenOneLine(l))\n l=infile.readline()\n return outFname", "def GraphSigVsSize(data, args):\n p = data[args]['sigsize']\n vers = sorted(p)\n sizes = sorted(p[vers[0]])\n for ver in vers:\n sigs = [p[ver][size]/size for size in sizes]\n plt.plot(sizes, sigs, label=ver)\n ax = plt.gca()\n ax.set_xlim(left=0, right=1024)\n #plt.xscale('log')\n #plt.yscale('log')\n saveplt('data/file-size-%s-%s.svg' % (args,'sig'), 'sigsize vs filesize for %s' % args,\n 'filesize', 'ratio', sizeticks)", "def _repr_svg_(self):\n if not IPythonConsole.ipython_useSVG:\n return None\n mol = self.owner.mol\n keku = IPythonConsole.kekulizeStructures\n size = IPythonConsole.molSize\n opts = IPythonConsole.drawOptions\n return Draw._moltoSVG(\n mol, size, self.aix, \"\", keku, drawOptions=opts, highlightBonds=self.bix\n )", "def _repr_svg_(self):\n if not IPythonConsole.ipython_useSVG:\n return None\n mol = self.owner.mol\n keku = IPythonConsole.kekulizeStructures\n size = IPythonConsole.molSize\n opts = IPythonConsole.drawOptions\n return Draw._moltoSVG(\n mol, size, self.aix, \"\", keku, drawOptions=opts, highlightBonds=self.bix\n )", "def put_drawing_svg(self, request):\n HttpRequest = request.to_http_info(self.api_client.configuration)\n return self.__make_request(HttpRequest, 'PUT', 'file')", "def wrapSVGatts(self,attkey,svglines):\n if not attkey: \n return svglines\n svgattribs= self.attkey_to_SVG_attribs(attkey)\n o= ('<g %s>\\n' % svgattribs) \n o+= svglines \n o+= ('</g> <!-- %s -->\\n' % svgattribs)\n return o", "def pack(self, data):\n for a, b in [(x, chr(ord(x) ^ 0x20)) for x in ['}','*','#','$']]:\n data = data.replace(a,'}%s' % b)\n crc = (sum(ord(c) for c in data) % 256) \n return \"$%s#%02X\" %(data, crc)", "def dvi_to_svg(dvi_file: str) -> str:\n file_type = get_tex_config()[\"intermediate_filetype\"]\n result = dvi_file.replace(\".\" + file_type, \".svg\")\n if not os.path.exists(result):\n commands = [\n \"dvisvgm\",\n \"\\\"{}\\\"\".format(dvi_file),\n \"-n\",\n \"-v\",\n \"0\",\n \"-o\",\n \"\\\"{}\\\"\".format(result),\n \">\",\n os.devnull\n ]\n os.system(\" \".join(commands))\n return result", "def svg(self, scale_factor=..., color=...): # -> str:\n ...", "def save_as_svg(file_name, path = DEFAULT_PATH):\n plt.ioff()\n plt.savefig(path + file_name + '.svg')\n plt.close()", "def generate():\n content = request.json\n assert 'svg' in content\n\n bw = BaseWorkflow(content['svg'])\n alt_text = bw.execute()\n print(alt_text)\n\n return jsonify({'alt_text': alt_text})", "def construct_svg_smash_commands(files, ids, cmd_format, cmd_args):\n commands = []\n for f in files:\n if not f.startswith('Figure'):\n continue\n\n prefix, remainder = f.split('.', 1)\n\n try:\n id_, remainder = remainder.rsplit('_', 1)\n except:\n # GLOBAL SVG for each figure\n assert remainder == 'GLOBAL'\n continue\n\n # ignore svgs for non-AG points\n if id_ not in ids:\n continue\n\n args = cmd_args.copy()\n args['sample_id'] = id_\n args['prefix'] = prefix\n commands.append(cmd_format % args)\n return commands", "def convert(filename,\nRenderer: \"\"\"By default, the schematic is converted to an SVG file,\n written to the standard output. It may also be rendered using TK.\"\"\",\n):\n \n with open(filename, \"rb\") as file:\n objects = read(file)\n stat = os.stat(file.fileno())\n \n sheet = objects[1]\n assert sheet[\"RECORD\"] == Record.SHEET\n (sheetstyle, size) = {SheetStyle.A4: (\"A4\", (1150, 760)), SheetStyle.A3: (\"A3\", (1550, 1150)), SheetStyle.A: (\"A\", (950, 760))}[sheet.get(\"SHEETSTYLE\", SheetStyle.A4)]\n if \"USECUSTOMSHEET\" in sheet:\n size = tuple(int(sheet[\"CUSTOM\" + \"XY\"[x]]) for x in range(2))\n \n # Units are 1/100\" or 10 mils\n renderer = Renderer(size, \"in\", 1/100,\n margin=0.3, line=1, down=-1, textbottom=True)\n \n for n in range(int(sheet[\"FONTIDCOUNT\"])):\n n = format(1 + n)\n fontsize = int(sheet[\"SIZE\" + n]) * 0.875\n family = sheet[\"FONTNAME\" + n].decode(\"ascii\")\n kw = dict()\n italic = sheet.get(\"ITALIC\" + n)\n if italic:\n kw.update(italic=True)\n bold = sheet.get(\"BOLD\" + n)\n if bold:\n kw.update(bold=True)\n renderer.addfont(\"font\" + n, fontsize, family, **kw)\n renderer.setdefaultfont(\"font\" + sheet[\"SYSTEMFONT\"].decode(\"ascii\"))\n renderer.start()\n \n arrowhead = dict(base=5, shoulder=7, radius=3)\n arrowtail = dict(base=7, shoulder=0, radius=2.5)\n diamond = dict(base=10, shoulder=5, radius=2.5)\n \n pinmarkers = {\n PinElectrical.INPUT: arrowhead,\n PinElectrical.IO: diamond,\n PinElectrical.OUTPUT: arrowtail,\n PinElectrical.PASSIVE: None,\n PinElectrical.POWER: None,\n }\n \n def gnd(renderer):\n renderer.hline(10)\n renderer.vline(-7, +7, offset=(10, 0), width=1.5)\n renderer.vline(-4, +4, offset=(13, 0), width=1.5)\n renderer.vline(-1, +1, offset=(16, 0), width=1.5)\n def rail(renderer):\n renderer.hline(10)\n renderer.vline(-7, +7, offset=(10, 0), width=1.5)\n def arrowconn(renderer):\n renderer.hline(10, endarrow=arrowhead)\n def dchevron(renderer):\n renderer.hline(5)\n renderer.polyline(((8, +4), (5, 0), (8, -4)))\n renderer.polyline(((11, +4), (8, 0), (11, -4)))\n connmarkers = {\n PowerObjectStyle.ARROW: (arrowconn, 12),\n PowerObjectStyle.BAR: (rail, 12),\n PowerObjectStyle.GND: (gnd, 20),\n }\n \n def nc(renderer):\n renderer.line((+3, +3), (-3, -3), width=0.6)\n renderer.line((-3, +3), (+3, -3), width=0.6)\n renderer.addobjects((gnd, rail, arrowconn, dchevron, nc))\n \n with renderer.view(offset=(0, size[1])) as base:\n base.rectangle((size[0], -size[1]), width=0.6)\n base.rectangle((20, -20), (size[0] - 20, 20 - size[1]), width=0.6)\n for axis in range(2):\n for side in range(2):\n for n in range(4):\n translate = [None] * 2\n translate[axis] = size[axis] / 4 * (n + 0.5)\n translate[axis ^ 1] = 10\n if side:\n translate[axis ^ 1] += size[axis ^ 1] - 20\n translate[1] *= -1\n with base.view(offset=translate) as ref:\n label = chr(ord(\"1A\"[axis]) + n)\n ref.text(label, horiz=ref.CENTRE, vert=ref.CENTRE)\n if n + 1 < 4:\n x = size[axis] / 4 / 2\n if axis:\n ref.hline(-10, +10, offset=(0, -x),\n width=0.6)\n else:\n ref.vline(-10, +10, offset=(x, 0), width=0.6)\n \n if \"TITLEBLOCKON\" in sheet:\n if not os.path.isabs(filename):\n cwd = os.getcwd()\n pwd = os.getenv(\"PWD\")\n if os.path.samefile(pwd, cwd):\n cwd = pwd\n filename = os.path.join(pwd, filename)\n with base.view(offset=(size[0] - 20, 20 - size[1])) as block:\n points = ((-350, 0), (-350, 80), (-0, 80))\n block.polyline(points, width=0.6)\n block.hline(-350, 0, offset=(0, 50), width=0.6)\n block.vline(-30, offset=(-300, 50), width=0.6)\n block.vline(-30, offset=(-100, 50), width=0.6)\n block.hline(-350, 0, offset=(0, 20), width=0.6)\n block.hline(-350, 0, offset=(0, 10), width=0.6)\n block.vline(20, 0, offset=(-150, 0), width=0.6)\n \n block.text(\"Title\", (-345, 70))\n block.text(\"Size\", (-345, 40))\n block.text(sheetstyle, (-340, 30), vert=block.CENTRE)\n block.text(\"Number\", (-295, 40))\n block.text(\"Revision\", (-95, 40))\n block.text(\"Date\", (-345, 10))\n d = format(date.fromtimestamp(stat.st_mtime), \"%x\")\n block.text(d, (-300, 10))\n block.text(\"File\", (-345, 0))\n block.text(filename, (-300, 0))\n block.text(\"Sheet\", (-145, 10))\n block.text(\"of\", (-117, 10))\n block.text(\"Drawn By:\", (-145, 0))\n \n for obj in objects:\n if (obj.keys() - {\"INDEXINSHEET\"} == {\"RECORD\", \"OWNERPARTID\", \"LOCATION.X\", \"LOCATION.Y\", \"COLOR\"} and\n obj[\"RECORD\"] == Record.JUNCTION and obj.get(\"INDEXINSHEET\", b\"-1\") == b\"-1\" and obj[\"OWNERPARTID\"] == b\"-1\"):\n location = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n col = colour(obj[\"COLOR\"])\n renderer.circle(2, location, fill=col)\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"IOTYPE\", \"ALIGNMENT\"} == {\"RECORD\", \"OWNERPARTID\", \"STYLE\", \"WIDTH\", \"LOCATION.X\", \"LOCATION.Y\", \"COLOR\", \"AREACOLOR\", \"TEXTCOLOR\", \"NAME\", \"UNIQUEID\"} and\n obj[\"RECORD\"] == Record.PORT and obj[\"OWNERPARTID\"] == b\"-1\"):\n width = int(obj[\"WIDTH\"])\n if \"IOTYPE\" in obj:\n points = ((0, 0), (5, -5), (width - 5, -5),\n (width, 0), (width - 5, +5), (5, +5))\n else:\n points = ((0, -5), (width - 5, -5),\n (width, 0), (width - 5, +5), (0, +5))\n if (obj.get(\"ALIGNMENT\") == b\"2\") ^ (obj[\"STYLE\"] != b\"7\"):\n labelpoint = (10, 0)\n horiz = renderer.LEFT\n else:\n labelpoint = (width - 10, 0)\n horiz = renderer.RIGHT\n if obj[\"STYLE\"] == b\"7\":\n shapekw = dict(rotate=+90, offset=(0, +width))\n else:\n shapekw = dict()\n offset = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n with renderer.view(offset=offset) as view:\n view.polygon(points,\n width=0.6,\n outline=colour(obj[\"COLOR\"]),\n fill=colour(obj[\"AREACOLOR\"]),\n **shapekw)\n \n with contextlib.ExitStack() as context:\n if obj[\"STYLE\"] == b\"7\":\n view = context.enter_context(view.view(rotate=+1))\n view.text(\n overline(obj[\"NAME\"]),\n colour=colour(obj[\"TEXTCOLOR\"]),\n offset=labelpoint,\n vert=view.CENTRE, horiz=horiz,\n )\n \n elif (obj.keys() - {\"INDEXINSHEET\"} >= {\"RECORD\", \"OWNERPARTID\", \"LINEWIDTH\", \"COLOR\", \"LOCATIONCOUNT\", \"X1\", \"Y1\", \"X2\", \"Y2\"} and\n obj[\"RECORD\"] == Record.WIRE and obj[\"OWNERPARTID\"] == b\"-1\" and obj[\"LINEWIDTH\"] == b\"1\"):\n points = list()\n for location in range(int(obj[\"LOCATIONCOUNT\"])):\n location = format(1 + location)\n points.append(tuple(int(obj[x + location]) for x in \"XY\"))\n renderer.polyline(points, colour=colour(obj[\"COLOR\"]))\n elif (obj.keys() == {\"RECORD\", \"OWNERINDEX\"} and\n obj[\"RECORD\"] in {b\"46\", b\"48\", b\"44\"} or\n obj.keys() - {\"USECOMPONENTLIBRARY\", \"DESCRIPTION\", \"DATAFILECOUNT\", \"MODELDATAFILEENTITY0\", \"MODELDATAFILEKIND0\", \"DATALINKSLOCKED\", \"DATABASEDATALINKSLOCKED\", \"ISCURRENT\", \"INDEXINSHEET\", \"INTEGRATEDMODEL\", \"DATABASEMODEL\"} == {\"RECORD\", \"OWNERINDEX\", \"MODELNAME\", \"MODELTYPE\"} and\n obj[\"RECORD\"] == b\"45\" and obj.get(\"INDEXINSHEET\", b\"-1\") == b\"-1\" and obj.get(\"USECOMPONENTLIBRARY\", b\"T\") == b\"T\" and obj[\"MODELTYPE\"] in {b\"PCBLIB\", b\"SI\", b\"SIM\", b\"PCB3DLib\"} and obj.get(\"DATAFILECOUNT\", b\"1\") == b\"1\" and obj.get(\"ISCURRENT\", b\"T\") == b\"T\" and obj.get(\"INTEGRATEDMODEL\", b\"T\") == b\"T\" and obj.get(\"DATABASEMODEL\", b\"T\") == b\"T\" and obj.get(\"DATALINKSLOCKED\", b\"T\") == b\"T\" and obj.get(\"DATABASEDATALINKSLOCKED\", b\"T\") == b\"T\" or\n obj.keys() >= {\"RECORD\", \"AREACOLOR\", \"BORDERON\", \"CUSTOMX\", \"CUSTOMY\", \"DISPLAY_UNIT\", \"FONTIDCOUNT\", \"FONTNAME1\", \"HOTSPOTGRIDON\", \"HOTSPOTGRIDSIZE\", \"ISBOC\", \"SHEETNUMBERSPACESIZE\", \"SIZE1\", \"SNAPGRIDON\", \"SNAPGRIDSIZE\", \"SYSTEMFONT\", \"USEMBCS\", \"VISIBLEGRIDON\", \"VISIBLEGRIDSIZE\"} and\n obj[\"RECORD\"] == Record.SHEET and obj[\"AREACOLOR\"] == b\"16317695\" and obj[\"BORDERON\"] == b\"T\" and obj.get(\"CUSTOMMARGINWIDTH\", b\"20\") == b\"20\" and obj.get(\"CUSTOMXZONES\", b\"6\") == b\"6\" and obj.get(\"CUSTOMYZONES\", b\"4\") == b\"4\" and obj[\"DISPLAY_UNIT\"] == b\"4\" and obj[\"FONTNAME1\"] == b\"Times New Roman\" and obj[\"HOTSPOTGRIDON\"] == b\"T\" and obj[\"ISBOC\"] == b\"T\" and obj[\"SHEETNUMBERSPACESIZE\"] == b\"4\" and obj[\"SIZE1\"] == b\"10\" and obj[\"SNAPGRIDON\"] == b\"T\" and obj[\"SYSTEMFONT\"] == b\"1\" and obj.get(\"TITLEBLOCKON\", b\"T\") == b\"T\" and obj[\"USEMBCS\"] == b\"T\" and obj[\"VISIBLEGRIDON\"] == b\"T\" and obj[\"VISIBLEGRIDSIZE\"] == b\"10\" or\n obj.keys() == {\"HEADER\", \"WEIGHT\"} and\n obj[\"HEADER\"] == b\"Protel for Windows - Schematic Capture Binary File Version 5.0\" or\n obj.keys() - {\"INDEXINSHEET\"} == {\"RECORD\", \"DESIMP0\", \"DESIMPCOUNT\", \"DESINTF\", \"OWNERINDEX\"} and\n obj[\"RECORD\"] == b\"47\" and obj[\"DESIMPCOUNT\"] == b\"1\" or\n obj.keys() == {\"RECORD\", \"ISNOTACCESIBLE\", \"OWNERPARTID\", \"FILENAME\"} and\n obj[\"RECORD\"] == b\"39\" and obj[\"ISNOTACCESIBLE\"] == b\"T\" and obj[\"OWNERPARTID\"] == b\"-1\"):\n pass\n \n elif (obj.keys() - {\"ISMIRRORED\", \"ORIENTATION\", \"INDEXINSHEET\", \"COMPONENTDESCRIPTION\", \"SHEETPARTFILENAME\", \"DESIGNITEMID\", \"DISPLAYMODE\", \"NOTUSEDBTABLENAME\", \"LIBRARYPATH\"} == {\"RECORD\", \"OWNERPARTID\", \"UNIQUEID\", \"AREACOLOR\", \"COLOR\", \"CURRENTPARTID\", \"DISPLAYMODECOUNT\", \"LIBREFERENCE\", \"LOCATION.X\", \"LOCATION.Y\", \"PARTCOUNT\", \"PARTIDLOCKED\", \"SOURCELIBRARYNAME\", \"TARGETFILENAME\"} and\n obj[\"RECORD\"] == b\"1\" and obj[\"OWNERPARTID\"] == b\"-1\" and obj[\"AREACOLOR\"] == b\"11599871\" and obj[\"COLOR\"] == b\"128\" and obj[\"PARTIDLOCKED\"] == b\"F\" and obj[\"TARGETFILENAME\"] == b\"*\"):\n pass\n \n elif (obj.keys() - {\"TEXT\", \"OWNERINDEX\", \"ISHIDDEN\", \"READONLYSTATE\", \"INDEXINSHEET\", \"UNIQUEID\", \"LOCATION.X\", \"LOCATION.X_FRAC\", \"LOCATION.Y\", \"LOCATION.Y_FRAC\", \"ORIENTATION\", \"ISMIRRORED\"} == {\"RECORD\", \"OWNERPARTID\", \"COLOR\", \"FONTID\", \"NAME\"} and\n obj[\"RECORD\"] == Record.PARAMETER and obj[\"OWNERPARTID\"] == b\"-1\"):\n if obj.get(\"ISHIDDEN\") != b\"T\" and obj.keys() >= {\"TEXT\", \"LOCATION.X\", \"LOCATION.Y\"}:\n orient = obj.get(\"ORIENTATION\")\n kw = {\n None: dict(vert=renderer.BOTTOM, horiz=renderer.LEFT),\n b\"1\": dict(vert=renderer.BOTTOM, horiz=renderer.LEFT),\n b\"2\": dict(vert=renderer.TOP, horiz=renderer.RIGHT),\n }[orient]\n if orient == b\"1\":\n kw.update(angle=+90)\n val = obj[\"TEXT\"]\n if val.startswith(b\"=\"):\n match = val[1:].lower()\n for o in objects:\n if o.get(\"RECORD\") != Record.PARAMETER or o.get(\"OWNERINDEX\") != obj[\"OWNERINDEX\"]:\n continue\n if o[\"NAME\"].lower() != match:\n continue\n val = o[\"TEXT\"]\n break\n else:\n raise LookupError(\"Parameter value for |OWNERINDEX={}|TEXT={}\".format(obj[\"OWNERINDEX\"].decode(\"ascii\"), obj[\"TEXT\"].decode(\"ascii\")))\n renderer.text(val.decode(\"ascii\"),\n colour=colour(obj[\"COLOR\"]),\n offset=(int(obj[\"LOCATION.\" + x]) for x in \"XY\"),\n font=\"font\" + obj[\"FONTID\"].decode(\"ascii\"),\n **kw)\n else:\n text(renderer, obj, **kw)\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"ISMIRRORED\", \"LOCATION.X_FRAC\", \"LOCATION.Y_FRAC\"} == {\"RECORD\", \"OWNERINDEX\", \"OWNERPARTID\", \"LOCATION.X\", \"LOCATION.Y\", \"COLOR\", \"FONTID\", \"TEXT\", \"NAME\", \"READONLYSTATE\"} and\n obj[\"RECORD\"] == Record.DESIGNATOR and obj[\"OWNERPARTID\"] == b\"-1\" and obj.get(\"INDEXINSHEET\", b\"-1\") == b\"-1\" and obj[\"NAME\"] == b\"Designator\" and obj[\"READONLYSTATE\"] == b\"1\"):\n desig = obj[\"TEXT\"].decode(\"ascii\")\n owner = objects[1 + int(obj[\"OWNERINDEX\"])]\n if int(owner[\"PARTCOUNT\"]) > 2:\n desig += chr(ord(\"A\") + int(owner[\"CURRENTPARTID\"]) - 1)\n renderer.text(desig, (int(obj[\"LOCATION.\" + x]) for x in \"XY\"),\n colour=colour(obj[\"COLOR\"]),\n font=\"font\" + obj[\"FONTID\"].decode(),\n )\n \n elif (obj.keys() >= {\"RECORD\", \"OWNERPARTID\", \"OWNERINDEX\", \"LOCATIONCOUNT\", \"X1\", \"X2\", \"Y1\", \"Y2\"} and\n obj[\"RECORD\"] == Record.POLYLINE and obj.get(\"ISNOTACCESIBLE\", b\"T\") == b\"T\" and obj.get(\"LINEWIDTH\", b\"1\") == b\"1\"):\n if obj[\"OWNERPARTID\"] == b\"-1\":\n current = True\n else:\n owner = objects[1 + int(obj[\"OWNERINDEX\"])]\n current = (obj[\"OWNERPARTID\"] == owner[\"CURRENTPARTID\"] and\n obj.get(\"OWNERPARTDISPLAYMODE\", b\"0\") == owner.get(\"DISPLAYMODE\", b\"0\"))\n if current:\n polyline(renderer, obj)\n \n elif (obj.keys() - {\"OWNERPARTDISPLAYMODE\", \"INDEXINSHEET\"} == {\"RECORD\", \"OWNERINDEX\", \"OWNERPARTID\", \"COLOR\", \"ISNOTACCESIBLE\", \"LINEWIDTH\", \"LOCATION.X\", \"LOCATION.Y\", \"CORNER.X\", \"CORNER.Y\"} and\n obj[\"RECORD\"] == Record.LINE and obj[\"ISNOTACCESIBLE\"] == b\"T\"):\n owner = objects[1 + int(obj[\"OWNERINDEX\"])]\n if (obj[\"OWNERPARTID\"] == owner[\"CURRENTPARTID\"] and\n obj.get(\"OWNERPARTDISPLAYMODE\", b\"0\") == owner.get(\"DISPLAYMODE\", b\"0\")):\n renderer.line(\n colour=colour(obj[\"COLOR\"]),\n width=int(obj[\"LINEWIDTH\"]),\n a=(int(obj[\"LOCATION.\" + x]) for x in \"XY\"),\n b=(int(obj[\"CORNER.\" + x]) for x in \"XY\"),\n )\n \n elif (obj.keys() - {\"NAME\", \"SWAPIDPIN\", \"OWNERPARTDISPLAYMODE\", \"ELECTRICAL\", \"DESCRIPTION\", \"SWAPIDPART\", \"SYMBOL_OUTEREDGE\"} == {\"RECORD\", \"OWNERINDEX\", \"OWNERPARTID\", \"DESIGNATOR\", \"FORMALTYPE\", \"LOCATION.X\", \"LOCATION.Y\", \"PINCONGLOMERATE\", \"PINLENGTH\"} and\n obj[\"RECORD\"] == Record.PIN and obj[\"FORMALTYPE\"] == b\"1\"):\n if obj[\"OWNERPARTID\"] == objects[1 + int(obj[\"OWNERINDEX\"])][\"CURRENTPARTID\"]:\n pinlength = int(obj[\"PINLENGTH\"])\n pinconglomerate = int(obj[\"PINCONGLOMERATE\"])\n offset = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n rotate = pinconglomerate & 3\n with renderer.view(offset=offset, rotate=rotate) as view:\n kw = dict()\n points = list()\n if \"SYMBOL_OUTEREDGE\" in obj:\n view.circle(2.85, (3.15, 0), width=0.6)\n points.append(6)\n points.append(pinlength)\n electrical = obj.get(\"ELECTRICAL\", PinElectrical.INPUT)\n marker = pinmarkers[electrical]\n if marker:\n kw.update(startarrow=marker)\n view.hline(*points, **kw)\n \n if pinconglomerate >> 1 & 1:\n invert = -1\n kw = dict(angle=180)\n else:\n invert = +1\n kw = dict()\n if pinconglomerate & 8 and \"NAME\" in obj:\n view.text(overline(obj[\"NAME\"]),\n vert=view.CENTRE,\n horiz=view.RIGHT * invert,\n offset=(-7, 0),\n **kw)\n if pinconglomerate & 16:\n designator = obj[\"DESIGNATOR\"].decode(\"ascii\")\n view.text(designator,\n horiz=view.LEFT * invert,\n offset=(+9, 0),\n **kw)\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"ORIENTATION\", \"STYLE\", \"ISCROSSSHEETCONNECTOR\"} == {\"RECORD\", \"OWNERPARTID\", \"COLOR\", \"LOCATION.X\", \"LOCATION.Y\", \"SHOWNETNAME\", \"TEXT\"} and\n obj[\"RECORD\"] == Record.POWER_OBJECT and obj[\"OWNERPARTID\"] == b\"-1\"):\n orient = obj.get(\"ORIENTATION\")\n if obj.get(\"ISCROSSSHEETCONNECTOR\") == b\"T\":\n marker = dchevron\n offset = 14\n else:\n (marker, offset) = connmarkers.get(obj[\"STYLE\"], (None, 0))\n \n col = colour(obj[\"COLOR\"])\n translate = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n with renderer.view(colour=col, offset=translate) as view:\n kw = dict()\n if orient:\n kw.update(rotate=int(orient))\n view.draw(marker, **kw)\n \n if obj[\"SHOWNETNAME\"] != b\"F\":\n orients = {\n b\"2\": (renderer.RIGHT, renderer.CENTRE, (-1, 0)),\n b\"3\": (renderer.CENTRE, renderer.TOP, (0, -1)),\n None: (renderer.LEFT, renderer.CENTRE, (+1, 0)),\n b\"1\": (renderer.CENTRE, renderer.BOTTOM, (0, +1)),\n }\n (horiz, vert, pos) = orients[orient]\n t = obj[\"TEXT\"].decode(\"ascii\")\n pos = (p * offset for p in pos)\n view.text(t, pos, horiz=horiz, vert=vert)\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"OWNERPARTDISPLAYMODE\", \"ISSOLID\", \"LINEWIDTH\", \"CORNERXRADIUS\", \"CORNERYRADIUS\", \"TRANSPARENT\"} == {\"RECORD\", \"OWNERINDEX\", \"OWNERPARTID\", \"AREACOLOR\", \"COLOR\", \"CORNER.X\", \"CORNER.Y\", \"ISNOTACCESIBLE\", \"LOCATION.X\", \"LOCATION.Y\"} and\n obj[\"RECORD\"] in {Record.RECTANGLE, Record.ROUND_RECTANGLE} and obj[\"ISNOTACCESIBLE\"] == b\"T\" and obj.get(\"ISSOLID\", b\"T\") == b\"T\"):\n owner = objects[1 + int(obj[\"OWNERINDEX\"])]\n if (obj[\"OWNERPARTID\"] == owner[\"CURRENTPARTID\"] and\n obj.get(\"OWNERPARTDISPLAYMODE\", b\"0\") == owner.get(\"DISPLAYMODE\", b\"0\")):\n kw = dict(width=0.6, outline=colour(obj[\"COLOR\"]))\n if \"ISSOLID\" in obj:\n kw.update(fill=colour(obj[\"AREACOLOR\"]))\n a = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n b = (int(obj[\"CORNER.\" + x]) for x in \"XY\")\n \n if obj[\"RECORD\"] == Record.ROUND_RECTANGLE:\n r = list()\n for x in \"XY\":\n radius = obj.get(\"CORNER{}RADIUS\".format(x))\n if radius is None:\n radius = 0\n else:\n radius = int(radius)\n r.append(int(radius))\n renderer.roundrect(r, a, b, **kw)\n else:\n renderer.rectangle(a, b, **kw)\n \n elif (obj.keys() - {\"INDEXINSHEET\"} == {\"RECORD\", \"OWNERPARTID\", \"COLOR\", \"FONTID\", \"LOCATION.X\", \"LOCATION.Y\", \"TEXT\"} and\n obj[\"RECORD\"] == Record.NET_LABEL and obj[\"OWNERPARTID\"] == b\"-1\"):\n renderer.text(overline(obj[\"TEXT\"]),\n colour=colour(obj[\"COLOR\"]),\n offset=(int(obj[\"LOCATION.\" + x]) for x in \"XY\"),\n font=\"font\" + obj[\"FONTID\"].decode(\"ascii\"),\n )\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"OWNERPARTDISPLAYMODE\", \"STARTANGLE\", \"SECONDARYRADIUS\"} == {\"RECORD\", \"OWNERPARTID\", \"OWNERINDEX\", \"COLOR\", \"ENDANGLE\", \"ISNOTACCESIBLE\", \"LINEWIDTH\", \"LOCATION.X\", \"LOCATION.Y\", \"RADIUS\"} and\n obj[\"RECORD\"] in {Record.ARC, Record.ELLIPTICAL_ARC} and obj[\"ISNOTACCESIBLE\"] == b\"T\" and obj[\"LINEWIDTH\"] == b\"1\" and obj.get(\"OWNERPARTDISPLAYMODE\", b\"1\") == b\"1\"):\n owner = objects[1 + int(obj[\"OWNERINDEX\"])]\n if (owner[\"CURRENTPARTID\"] == obj[\"OWNERPARTID\"] and\n owner.get(\"DISPLAYMODE\", b\"0\") == obj.get(\"OWNERPARTDISPLAYMODE\", b\"0\")):\n r = int(obj[\"RADIUS\"])\n if obj[\"RECORD\"] == Record.ELLIPTICAL_ARC:\n r2 = obj.get(\"SECONDARYRADIUS\")\n if r2 is None:\n r2 = 0\n else:\n r2 = int(r2)\n else:\n r2 = r\n \n start = float(obj.get(\"STARTANGLE\", 0))\n end = float(obj[\"ENDANGLE\"])\n centre = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n renderer.arc((r, r2), start, end, centre,\n colour=colour(obj[\"COLOR\"]),\n )\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"LINEWIDTH\"} > {\"RECORD\", \"AREACOLOR\", \"COLOR\", \"ISNOTACCESIBLE\", \"ISSOLID\", \"LOCATIONCOUNT\", \"OWNERINDEX\", \"OWNERPARTID\"} and\n obj[\"RECORD\"] == Record.POLYGON and obj[\"AREACOLOR\"] == b\"16711680\" and obj[\"ISNOTACCESIBLE\"] == b\"T\" and obj[\"ISSOLID\"] == b\"T\" and obj.get(\"LINEWIDTH\", b\"1\") == b\"1\" and obj[\"OWNERPARTID\"] == b\"1\"):\n points = list()\n for location in range(int(obj[\"LOCATIONCOUNT\"])):\n location = format(1 + location)\n points.append(tuple(int(obj[x + location]) for x in \"XY\"))\n renderer.polygon(fill=colour(obj[\"COLOR\"]), points=points)\n elif (obj.keys() - {\"INDEXINSHEET\", \"ISNOTACCESIBLE\", \"OWNERINDEX\", \"ORIENTATION\", \"JUSTIFICATION\", \"COLOR\"} == {\"RECORD\", \"FONTID\", \"LOCATION.X\", \"LOCATION.Y\", \"OWNERPARTID\", \"TEXT\"} and\n obj[\"RECORD\"] == Record.LABEL):\n if obj[\"OWNERPARTID\"] == b\"-1\" or obj[\"OWNERPARTID\"] == objects[1 + int(obj[\"OWNERINDEX\"])][\"CURRENTPARTID\"]:\n text(renderer, obj)\n elif (obj.keys() - {\"INDEXINSHEET\"} == {\"RECORD\", \"COLOR\", \"LOCATION.X\", \"LOCATION.Y\", \"OWNERPARTID\"} and\n obj[\"RECORD\"] == b\"22\" and obj[\"OWNERPARTID\"] == b\"-1\"):\n col = colour(obj[\"COLOR\"])\n location = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n renderer.draw(nc, location, colour=col)\n elif (obj.keys() - {\"CLIPTORECT\"} == {\"RECORD\", \"ALIGNMENT\", \"AREACOLOR\", \"CORNER.X\", \"CORNER.Y\", \"FONTID\", \"ISSOLID\", \"LOCATION.X\", \"LOCATION.Y\", \"OWNERPARTID\", \"Text\", \"WORDWRAP\"} and\n obj[\"RECORD\"] == b\"28\" and obj[\"ALIGNMENT\"] == b\"1\" and obj[\"AREACOLOR\"] == b\"16777215\" and obj.get(\"CLIPTORECT\", b\"T\") == b\"T\" and obj[\"ISSOLID\"] == b\"T\" and obj[\"OWNERPARTID\"] == b\"-1\" and obj[\"WORDWRAP\"] == b\"T\"):\n lhs = int(obj[\"LOCATION.X\"])\n renderer.text(\n font=\"font\" + obj[\"FONTID\"].decode(\"ascii\"),\n offset=(lhs, int(obj[\"CORNER.Y\"])),\n width=int(obj[\"CORNER.X\"]) - lhs,\n text=obj[\"Text\"].decode(\"ascii\").replace(\"~1\", \"\\n\"),\n vert=renderer.TOP,\n )\n \n elif (obj.keys() == {\"RECORD\", \"OWNERINDEX\", \"ISNOTACCESIBLE\", \"OWNERPARTID\", \"LINEWIDTH\", \"COLOR\", \"LOCATIONCOUNT\", \"X1\", \"Y1\", \"X2\", \"Y2\", \"X3\", \"Y3\", \"X4\", \"Y4\"} and\n obj[\"RECORD\"] == Record.BEZIER and obj[\"ISNOTACCESIBLE\"] == b\"T\" and obj[\"OWNERPARTID\"] == b\"1\" and obj[\"LINEWIDTH\"] == b\"1\" and obj[\"LOCATIONCOUNT\"] == b\"4\"):\n col = colour(obj[\"COLOR\"])\n points = list()\n for n in range(4):\n n = format(1 + n)\n points.append(tuple(int(obj[x + n]) for x in \"XY\"))\n renderer.cubicbezier(*points, colour=col)\n \n elif (obj.keys() - {\"RADIUS_FRAC\", \"SECONDARYRADIUS_FRAC\"} == {\"RECORD\", \"OWNERINDEX\", \"ISNOTACCESIBLE\", \"OWNERPARTID\", \"LOCATION.X\", \"LOCATION.Y\", \"RADIUS\", \"SECONDARYRADIUS\", \"COLOR\", \"AREACOLOR\", \"ISSOLID\"} and\n obj[\"RECORD\"] == Record.ELLIPSE and obj[\"ISNOTACCESIBLE\"] == b\"T\" and obj.get(\"RADIUS_FRAC\", b\"94381\") == b\"94381\" and obj[\"SECONDARYRADIUS\"] == obj[\"RADIUS\"] and obj.get(\"SECONDARYRADIUS_FRAC\", b\"22993\") == b\"22993\" and obj[\"ISSOLID\"] == b\"T\"):\n renderer.circle(\n r=int(obj[\"RADIUS\"]),\n width=0.6,\n outline=colour(obj[\"COLOR\"]), fill=colour(obj[\"AREACOLOR\"]),\n offset=(int(obj[\"LOCATION.\" + x]) for x in \"XY\"),\n )\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"SYMBOLTYPE\"} == {\"RECORD\", \"OWNERPARTID\", \"LOCATION.X\", \"LOCATION.Y\", \"XSIZE\", \"YSIZE\", \"COLOR\", \"AREACOLOR\", \"ISSOLID\", \"UNIQUEID\"} and\n obj[\"RECORD\"] == Record.SHEET_SYMBOL and obj[\"OWNERPARTID\"] == b\"-1\" and obj[\"ISSOLID\"] == b\"T\" and obj.get(\"SYMBOLTYPE\", b\"Normal\") == b\"Normal\"):\n renderer.rectangle((int(obj[\"XSIZE\"]), -int(obj[\"YSIZE\"])),\n width=0.6,\n outline=colour(obj[\"COLOR\"]), fill=colour(obj[\"AREACOLOR\"]),\n offset=(int(obj[\"LOCATION.\" + x]) for x in \"XY\"),\n )\n \n elif (obj.keys() - {\"INDEXINSHEET\"} == {\"RECORD\", \"OWNERINDEX\", \"OWNERPARTID\", \"LOCATION.X\", \"LOCATION.Y\", \"COLOR\", \"FONTID\", \"TEXT\"} and\n obj[\"RECORD\"] in {Record.SHEET_NAME, Record.SHEET_FILE_NAME} and obj.get(\"INDEXINSHEET\", b\"-1\") == b\"-1\" and obj[\"OWNERPARTID\"] == b\"-1\"):\n text(renderer, obj)\n \n elif (obj.keys() == {\"RECORD\", \"OWNERINDEX\", \"INDEXINSHEET\", \"OWNERPARTID\", \"LOCATION.X\", \"LOCATION.Y\", \"CORNER.X\", \"CORNER.Y\", \"EMBEDIMAGE\", \"FILENAME\"} and\n obj[\"RECORD\"] == Record.IMAGE and obj[\"OWNERINDEX\"] == b\"1\" and obj[\"OWNERPARTID\"] == b\"-1\" and obj[\"EMBEDIMAGE\"] == b\"T\" and obj[\"FILENAME\"] == b\"newAltmLogo.bmp\"):\n location = list()\n corner = list()\n for x in \"XY\":\n location.append(int(obj[\"LOCATION.\" + x]))\n corner.append(int(obj[\"CORNER.\" + x]))\n renderer.rectangle(location, corner, width=0.6)\n \n else:\n print(\"\".join(\"|{}={!r}\".format(p, v) for (p, v) in sorted(obj.items())), file=stderr)\n \n renderer.finish()", "def GraphSigVsVers(data, args):\n p = data[args]['sigsize']\n vers = sorted(v for v in p if 'S1' not in v)\n #sizes = sorted(p[vers[0]])\n for size in sizeticks:\n sigs = [p[ver][size]/size for ver in vers]\n plt.plot(vers, sigs, label=\"%sM\" % size)\n #plt.xscale('log')\n #plt.yscale('log')\n saveplt('data/file-vers-%s-%s.svg' % (args,'sig'), 'sigsize vs version for %s' % args,\n 'version', 'ratio', vers)", "def wrap_in_html(self,svgofmodel):\n html= '''<html>\\n%s\\n%s\\n%s\\n</g></g></g></svg></body></html>\\n'''\n svgbody= '''<body onload=\"javascript:setTimeout(&quot;location.reload(true);&quot;,%d);\">\\n''' % self.vrefreshms\n svgbody += \"<h4>GeoGad</h4>\"\n svghead= '<svg xmlns=\"http://www.w3.org/2000/svg\" version=\"1.2\" baseProfile=\"tiny\" width=\"%dpx\" height=\"%dpx\">\\n'\n svghead= svghead % (self.vboxX,self.vboxY)\n svghead+= '<rect x=\"1\" y=\"1\" width=\"%d\" height=\"%d\" fill=\"none\" stroke=\"blue\" stroke-width=\"4\"/>\\n'% (self.vboxX,self.vboxY)\n svghead+= '<g fill=\"none\" stroke=\"black\" stroke-width=\"%0.2f\">\\n' % self.vlinewidth\n svghead+= '<g transform=\"scale(%0.2f,%0.2f)\">\\n' % (self.vscaleX,self.vscaleY)\n svghead+= '<g transform=\"translate(%0.2f,%0.2f)\">\\n' % (self.vtranX,self.vtranY)\n return html % (svgbody,svghead,svgofmodel)", "def generate_svg(self, item, type_, filename, locale):\n\n old_locale = item.session_manager.current_locale\n item.session_manager.current_locale = locale\n\n chart = None\n if type_ == 'candidates':\n chart = self.renderer.get_candidates_chart(item, 'svg')\n if type_ == 'connections':\n chart = self.renderer.get_connections_chart(item, 'svg')\n if type_ == 'list-groups':\n chart = self.renderer.get_list_groups_chart(item, 'svg')\n if type_ == 'lists':\n chart = self.renderer.get_lists_chart(item, 'svg')\n if type_ == 'lists-panachage':\n chart = self.renderer.get_lists_panachage_chart(item, 'svg')\n if type_ == 'seat-allocation':\n chart = self.renderer.get_seat_allocation_chart(item, 'svg')\n if type_ == 'party-strengths':\n chart = self.renderer.get_party_strengths_chart(item, 'svg')\n if type_ == 'parties-panachage':\n chart = self.renderer.get_parties_panachage_chart(item, 'svg')\n if type_ == 'entities-map':\n chart = self.renderer.get_entities_map(item, 'svg', locale)\n if type_ == 'districts-map':\n chart = self.renderer.get_districts_map(item, 'svg', locale)\n\n item.session_manager.current_locale = old_locale\n\n if chart:\n path = '{}/{}'.format(self.svg_dir, filename)\n with self.app.filestorage.open(path, 'w') as f:\n copyfileobj(chart, f)\n log.info(\"{} created\".format(filename))\n return 1\n\n return 0", "def _get_checksum(self, arg):", "def _dessiner_grille_svg(pfichier):\n global grille\n if not grille:\n # protege des utilisations non autorisees\n # (avant l'appel de sauver_svg())\n return\n x = grille.origine[0]\n y = grille.origine[1]\n d = grille.definition\n taille_x = grille.taille_x\n taille_y = grille.taille_y\n # c'est en realite la meme methode que dans grille.dessinegrille()\n for i in range(taille_y+1):\n pfichier.write(\n \"<line x1=\\\"\" + str(x) + \"\\\" \"\n + \"x2=\\\"\" + str(x+d*taille_y) + \"\\\" \"\n + \"y1=\\\"\" + str(y) + \"\\\" \"\n + \"y2=\\\"\" + str(y+(d*taille_y/2)) + \"\\\"\")\n x -= d\n y += d/2\n pfichier.write(\" stroke=\\\"grey\\\"\")\n pfichier.write(\"/>\\n\")\n x = grille.origine[0]\n y = grille.origine[1]\n for j in range(taille_x+1):\n pfichier.write(\n \"<line x1=\\\"\" + str(x) + \"\\\" \"\n + \"x2=\\\"\" + str(x-d*taille_y) + \"\\\" \"\n + \"y1=\\\"\" + str(y) + \"\\\" \"\n + \"y2=\\\"\" + str(y+(d*taille_y/2)) + \"\\\"\")\n x += d\n y += d/2\n pfichier.write(\" stroke=\\\"grey\\\"\")\n pfichier.write(\"/>\\n\")", "def draw(self, output_file):\n self.calc_width()\n self.calc_height()\n\n surface = cairo.SVGSurface(output_file, self.width, self.height)\n ctx = cairo.Context(surface)\n\n ## change background color\n ctx.rectangle(0, 0, self.width, self.height)\n ctx.set_source_rgb(1, 1, 1)\n ctx.fill()\n\n ## Variables\n line_spacing = 125\n line_depth = 125\n header_depth = 75\n left_spacing = 35\n\n ## Create custom color palette\n color_palette = [[],[],[]]\n num_colors_per = self.number_of_motifs//3\n max_num_colors_per = self.number_of_motifs - (2 * num_colors_per)\n gradient = 1/num_colors_per\n max_gradient = 1/max_num_colors_per\n # color_gradient_value = \n for i in range(3):\n if i == 2:\n for k in range(1,max_num_colors_per + 1):\n color_palette[i].append(k*max_gradient)\n else:\n for k in range(1,num_colors_per + 1):\n color_palette[i].append(k*gradient)\n # print(max_num_colors_per)\n # print(color_palette)\n\n\n ## Legend\n x_legend = self.width - self.width_of_legend\n y_legend = 75\n legend_width = 145\n legend_height = (self.number_of_motifs * 15) + 8\n ctx.rectangle(x_legend,y_legend,legend_width,legend_height)\n ctx.set_source_rgb(0,0,0)\n ctx.stroke()\n legend_line_length = 35\n count = 1\n for i in range(3):\n for j in range(len(color_palette[i])):\n ctx.move_to(x_legend + 5, y_legend + (count*15))\n ctx.line_to(x_legend + legend_line_length, y_legend + (count*15))\n if i == 0:\n ctx.set_source_rgb(color_palette[i][j],0,0)\n if i == 1:\n ctx.set_source_rgb(0,color_palette[i][j],0)\n if i == 2:\n ctx.set_source_rgb(0,0,color_palette[i][j])\n ctx.set_line_width(3)\n ctx.stroke()\n\n ctx.move_to((x_legend + legend_line_length) + 10, y_legend + (count*15))\n ctx.set_font_size(11)\n ctx.select_font_face(\"Arial\",cairo.FONT_SLANT_NORMAL,cairo.FONT_WEIGHT_NORMAL)\n ctx.set_source_rgb(0,0,0)\n ctx.show_text(self.list_of_motifs[count-1])\n\n count += 1\n\n for i in range(len(self.list_of_motif_objects)):\n current_motif_obj = self.list_of_motif_objects[i]\n current_length_of_seq = len(current_motif_obj.sequence)\n current_motif_coords = current_motif_obj.motif_coordinates\n current_motif_sequences = current_motif_obj.motif_sequences\n current_exon_coords = current_motif_obj.exon_coordinates\n\n width_left = self.width - current_length_of_seq - self.width_of_legend\n \n ## Draw main sequence line\n ctx.move_to(left_spacing,(i*line_spacing) + line_depth) \n ctx.line_to(left_spacing + current_length_of_seq,(i*line_spacing) + line_depth)\n ctx.set_source_rgb(0,0,0)\n ctx.set_line_width(2)\n ctx.stroke()\n\n ## Draw the exon\n x1 = left_spacing + current_exon_coords[0][0]\n y1 = (i*line_spacing) + line_depth - 20\n rec_width = current_exon_coords[0][1] - current_exon_coords[0][0]\n rec_height = 40\n ctx.rectangle(x1,y1,rec_width,rec_height)\n ctx.set_source_rgb(0,0,0)\n ctx.stroke()\n\n ## Loop to draw all motifs\n for j in range(len(current_motif_coords)):\n ctx.move_to(left_spacing + current_motif_coords[j][0],(i*line_spacing) + line_depth) \n ctx.line_to(left_spacing + current_motif_coords[j][0] + 2,(i*line_spacing) + line_depth)\n motif_num = current_motif_coords[j][2]\n if(motif_num < num_colors_per):\n ctx.set_source_rgb(color_palette[0][motif_num],0,0)\n if(motif_num >= num_colors_per and motif_num < (2*num_colors_per)):\n ctx.set_source_rgb(0,color_palette[1][motif_num-num_colors_per],0)\n if(motif_num >= (2*num_colors_per)):\n ctx.set_source_rgb(0,0,color_palette[2][motif_num-(2*num_colors_per)])\n ctx.set_line_width(15)\n ctx.stroke()\n\n ## adding header text\n ctx.move_to(left_spacing, (i*line_spacing) + header_depth)\n ctx.set_font_size(17)\n ctx.select_font_face(\"Arial\",cairo.FONT_SLANT_NORMAL,cairo.FONT_WEIGHT_NORMAL)\n ctx.set_source_rgb(0,0,0)\n ctx.show_text(current_motif_obj.header)\n\n # ## adding sequence text (MAYBE MAKE THIS OPTIONAL FLAG?)\n # disp_length = 80\n # last_k = 0\n # for k in range(len(current_motif_obj.sequence)//disp_length):\n # current_seq = current_motif_obj.sequence[k*disp_length:(k*disp_length)+disp_length]\n # ctx.move_to(50, (i*512) + 125 + (25*k))\n # ctx.set_font_size(14)\n # ctx.select_font_face(\"Arial\",cairo.FONT_SLANT_NORMAL,cairo.FONT_WEIGHT_NORMAL)\n # ctx.set_source_rgb(0,0,0)\n # ctx.show_text(current_seq)\n # last_k = k\n # final_num = ((len(current_motif_obj.sequence)//disp_length)*disp_length)\n # the_rest = current_motif_obj.sequence[final_num:]\n # ctx.move_to(50, (i*512) + 125 + (25*(last_k + 1)))\n # ctx.set_font_size(14)\n # ctx.select_font_face(\"Arial\",cairo.FONT_SLANT_NORMAL,cairo.FONT_WEIGHT_NORMAL)\n # ctx.set_source_rgb(0,0,0)\n # ctx.show_text(the_rest)\n\n\n\n surface.finish()", "def export(self):\n def get_export_cmd(svgfile, fmt, dpi, outfile):\n if _use_rsvg and os.name == 'posix':\n # A DPI of 72 must be set to convert from files generated with\n # Inkscape v1+ to get the correct page size.\n ret = os.system('rsvg-convert --version 1>/dev/null')\n if ret == 0:\n return ('rsvg-convert' +\n ' --dpi-x=' + str(dpi * 72.0 / 96.0) +\n ' --dpi-y=' + str(dpi * 72.0 / 96.0) +\n ' --format=' + fmt +\n ' --output=\"' + outfile + '\"' +\n ' \"' + svgfile + '\"')\n else:\n return ('inkscape '\n + '--export-dpi=' + str(dpi) + ' '\n + '--export-type=' + fmt + ' '\n + '--export-filename=\"' + outfile + '\" '\n '\"' + svgfile + '\"')\n\n for line, svgfile in self.svgouts.iteritems():\n d = self.get_line_desc(line)\n outfile = self.get_output(d)\n if self.options.format == 'jpg':\n # TODO: output a jpg file\n self.options.format = 'png'\n outfile = outfile.replace('jpg', 'png')\n if self.options.format == 'svg':\n try:\n shutil.move(svgfile, outfile)\n except OSError:\n errormsg(_('Cannot create \"' + outfile + '\"'))\n else:\n cmd = get_export_cmd(svgfile,\n self.options.format,\n self.options.dpi, outfile)\n os.system(cmd)", "def generate_sum(file_path):\n #file = open(file_path, 'rb')\n #header = file.read()\n header = open(file_path, 'rb').read()\n suma_md5 = md5(header).hexdigest()\n return suma_md5", "def ps2svg_string(sPostscript):\n\n def group_numbers(result, times = 1):\n nums = []\n for sNum in result.groups():\n if re.match(r'[a-zA-Z]+', sNum):\n # This is just a string\n nums.append(sNum)\n else:\n # This must be a floating point number\n nums.append(\"{:.6f}\".format(times * float(sNum) ))\n return nums\n\n sBack = \"\"\n lst_out = []\n oErr = ErrHandle()\n path_style = \"fill:none;stroke:#000000;stroke-width:16;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1\"\n point_style = \"font-variant:normal;font-weight:normal;font-size:13.39669991px;font-family:Times;-inkscape-font-specification:Times-Roman;writing-mode:lr-tb;fill:#0000FF;fill-opacity:1;fill-rule:nonzero;stroke:none\"\n try:\n # Recognize the initial lines we are looking for\n re_Line = re.compile( r'^\\s+([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+l$')\n re_point = re.compile(r'^([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+translate\\s+([0-9]+\\.?[0-9]*)\\s+rotate$')\n re_label = re.compile(r'^\\(([a-zA-Z]+)\\)\\s+show$')\n\n lst_out.append(sIntro)\n\n # Split into lines\n lines = sPostscript.split(\"\\n\")\n section = \"pre\"\n idx = 14\n point_info = []\n bFirstPoint = True\n oorsprong = dict(x=0.0, y=0.0)\n for line in lines:\n # Check if we have a line \n if section == \"pre\":\n result = re_Line.search(line)\n if result:\n section = \"lines\"\n else:\n # We are not in a lines section\n pass\n if section == \"lines\":\n result = re_Line.search(line)\n if result:\n nums = group_numbers(result, 10)\n # Convert into path line\n sPathLine = '<path id=\"path{}\" style=\"{}\" d=\"M {},{} {},{}\" />'.format(\n idx, path_style, nums[0], nums[1], nums[2], nums[3])\n idx += 2\n lst_out.append(sPathLine)\n else:\n # We have exited the lines section\n section = \"point\"\n lst_out.append('<g transform=\"scale(10)\" id=\"g{}\">'.format(idx))\n idx += 2\n elif section == \"point\":\n # Look for a point\n result = re_point.search(line)\n if result:\n # We have found a point: get it in\n nums = group_numbers(result, 1)\n\n # Is this the first point?\n if bFirstPoint:\n lst_out.append('<text id=\"text{}\" style=\"{}\" transform=\"matrix(1,0,0,-1,{},{})\">'.format(\n idx, point_style, nums[0], nums[1]))\n idx += 2\n oorsprong['x'] = float(nums[0])\n oorsprong['y'] = float(nums[1])\n bFirstPoint = False\n\n # In all situations: position w.r.t. oorsprong\n pos_x = \"{:.6f}\".format(float(nums[0]) - oorsprong['x']) \n pos_y = \"{:.6f}\".format(oorsprong['y'] - float(nums[1]) )\n point_info.append(pos_y)\n point_info.append(pos_x)\n\n section = \"label\"\n elif section == \"label\":\n # Look for a label\n result = re_label.search(line)\n if result:\n # we have found a label: get it\n sLabel = result.groups()[0]\n point_info.append(sLabel)\n\n # Output this label\n sLabel = '<tspan id=\"tspan{}\" y=\"{}\" x=\"{}\">{}</tspan>'.format(\n idx, pos_y, pos_x, sLabel)\n idx += 2\n lst_out.append(sLabel)\n\n section = \"point\"\n point_info = []\n\n # Finish up the svg nicely\n lst_out.append(\" </text>\")\n lst_out.append(\" </g>\")\n lst_out.append(\" </g>\")\n lst_out.append(\" </g>\")\n lst_out.append(\"</svg>\")\n # Convert the list into a string\n sBack = \"\\n\".join(lst_out)\n except:\n msg = oErr.get_error_message()\n oErr.DoError(\"ps2svg\")\n\n # Return what we have gathered\n return sBack", "def complete_gst(gst_in, gga_in):\n\n gst_out = gst_in.split(',')\n# gga_in = gga_in.split(',')\n# hdop = np.float(gga_in[8])\n# hdop = 0.05\n q=1\n gst_out[2] = str(0.006*q)\n gst_out[3] = str(float(gst_out[6])*q)\n gst_out[4] = str(float(gst_out[7])*q)\n gst_out[6] = str(float(gst_out[6])*q)\n gst_out[7] = str(float(gst_out[7])*q)\n gst_out[5] = str(270.0)\n gst = ','.join(gst_out)\n \n # Apply new checksum :\n gst = gst[:-4] + checksum(gst) + gst[-2:]\n\n return gst", "def merge_svg_files(svg_file1, svg_file2, x_coord, y_coord, scale=1):\n svg1 = _check_svg_file(svg_file1)\n svg2 = _check_svg_file(svg_file2)\n\n svg2_root = svg2.getroot()\n svg1.append([svg2_root])\n\n svg2_root.moveto(x_coord, y_coord, scale=scale)\n\n return svg1", "def __make_svg(self):\n if not self._items:\n return None\n\n # define call back functions for node format, href, subgraph\n def fnc_node_format(n):\n if (n.type, n.output_name, n.task_name, n.shard_idx) in self._items:\n return self._items[(n.type, n.output_name, n.task_name, n.shard_idx)][0]\n else:\n return None\n\n def fnc_href(n):\n if (n.type, n.output_name, n.task_name, n.shard_idx) in self._items:\n return self._items[(n.type, n.output_name, n.task_name, n.shard_idx)][1]\n else:\n return None\n\n def fnc_subgraph(n):\n if (n.type, n.output_name, n.task_name, n.shard_idx) in self._items:\n return self._items[(n.type, n.output_name, n.task_name, n.shard_idx)][2]\n else:\n return None\n\n # convert to dot string\n dot_str = self._dag.to_dot(\n fnc_node_format=fnc_node_format,\n fnc_href=fnc_href,\n fnc_subgraph=fnc_subgraph,\n template=self._template_d,\n )\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n # temporary dot, svg from graphviz.Source.render\n tmp_dot = os.path.join(tmp_dir, '_tmp_.dot')\n\n try:\n svg = Source(dot_str, format='svg').render(filename=tmp_dot)\n except (ExecutableNotFound, FileNotFoundError):\n logger.error(\n 'Importing graphviz failed. Task graph will not be available. '\n 'Check if you have installed graphviz correctly so that '\n '\"dot\" executable exists on your PATH. '\n '\"pip install graphviz\" does not install such \"dot\". '\n 'Use apt or system-level installer instead. '\n 'e.g. sudo apt-get install graphviz.'\n )\n return None\n\n # save to DOT\n uri_dot = os.path.join(\n self._out_dir,\n CrooHtmlReportTaskGraph.TASK_GRAPH_DOT.format(\n workflow_id=self._workflow_id\n ),\n )\n AutoURI(uri_dot).write(dot_str, no_lock=True)\n\n # save to SVG\n uri_svg = os.path.join(\n self._out_dir,\n CrooHtmlReportTaskGraph.TASK_GRAPH_SVG.format(\n workflow_id=self._workflow_id\n ),\n )\n svg_contents = AutoURI(svg).read()\n AutoURI(uri_svg).write(svg_contents, no_lock=True)\n\n return svg_contents", "def make_checksum_file(self, project):\n return None", "def test_checksum(size1, size2, lines, tmpdir):\n fp = tmpdir.join(\"temp-data.txt\").strpath\n data = \"\\n\".join(lines)\n with open(fp, 'w') as f:\n f.write(data)\n exp = hashlib.new(\"md5\", data.encode(\"utf-8\")).hexdigest()\n res1 = checksum(fp, size1)\n res2 = checksum(fp, size2)\n assert exp == res1\n assert res1 == res2\n assert res2 == exp", "def svg(self) -> str:\n return SPOUSE_LINK_TEMPLATE.format(**self._fields)", "def update_png_crusher():\n if os.path.exists(PNG_CRUSHER):\n return\n\n for path in glob.glob(os.path.join(libdot.BIN_DIR, '.png.crusher.*')):\n os.unlink(path)\n\n r = requests.get(PNG_CRUSHER_URL + '?format=TEXT')\n with open(PNG_CRUSHER, 'wb') as fp:\n fp.write(base64.b64decode(r.text))\n\n os.chmod(PNG_CRUSHER, 0o755)", "def prepare_svg(svg_tag, color_rgb, img_dims, canvas_dims):\n # fill sketches\n svg_tag = unescape(svg_tag)\n\n # color_str = f\"rgb({color_rgb[0]},{color_rgb[1]},{color_rgb[2]})\"\n svg_tag = svg_tag.replace(\"fill: rgb(0,0,0);\", f\"fill: {color_rgb};\")\n svg_tag = svg_tag.replace(\"stroke: rgb(255,255,255);\", f\"stroke: {color_rgb};\")\n svg_tag = svg_tag.replace(\"fill-opacity: 0;\", \"fill-opacity: 1;\")\n # todo adjust transform\n scale_x = img_dims[\"width\"] / canvas_dims[\"width\"]\n scale_y = img_dims[\"height\"] / canvas_dims[\"height\"]\n svg_tag = svg_tag.replace('transform=\"', f'transform=\" scale({scale_x} {scale_y}) ')\n\n # if float(scale_x) != 1.0:\n # print(svg_tag)\n # print(f\"scale {scale_x},{scale_y}\")\n # utils.exit(\"early quit\")\n\n return svg_tag", "def save_plot(p, file_name, path='../static/images/'):\n p.output_backend = \"svg\"\n export_svgs(p, filename=path + file_name + '.svg')", "def test_base(self):\n output_filename = get_resource_filename(\"rendered_simple_latex.svg\")\n with open(output_filename, \"r\", encoding=\"utf-8\") as expected_output:\n self.assertSvgEquals(\n render_latex_to_image(r\"I = \\int \\rho R^{2} dV\"), expected_output.read()\n )", "def rsvg_export(input_file, output_file, dpi=90, rsvg_binpath=None):\n if not os.path.exists(input_file):\n log.error('File {} not found.'.format(input_file))\n raise IOError((0, 'File not found.', input_file))\n\n if rsvg_binpath is None:\n rsvg_binpath = which('rsvg-convert')\n check_command(rsvg_binpath)\n\n args_strings = []\n args_strings += [\"-f pdf\"]\n args_strings += [\"-o {}\".format(output_file)]\n args_strings += [\"--dpi-x {}\".format(dpi)]\n args_strings += [\"--dpi-y {}\".format(dpi)]\n args_strings += [input_file]\n\n return call_command(rsvg_binpath, args_strings)", "def checksum(path):\n with open(path, 'r') as f:\n return md5(f.read()).digest()", "def to_svg(self, separate=False, include_junctions=False):\n serialize_as_svg(self.output, separate, include_junctions)", "def addChecksum(s):\n if len(s) < 1:\n raise ValueError, \"The provided string needs to be atleast 1 byte long\"\n return (_calcChecksum(s) + s)", "def _asset_hash(path: str) -> str:\n full_path = THEME_PATH / \"static\" / path\n digest = hashlib.sha1(full_path.read_bytes()).hexdigest()\n\n return f\"_static/{path}?digest={digest}\"", "def to_svg(self, outfile, scaling, precision, attributes):\n outfile.write('<g id=\"')\n outfile.write(self.name.replace(\"#\", \"_\"))\n outfile.write('\" ')\n outfile.write(attributes)\n outfile.write(\">\\n\")\n for polygon in self.polygons:\n polygon.to_svg(outfile, scaling, precision)\n for path in self.paths:\n path.to_svg(outfile, scaling, precision)\n for label in self.labels:\n label.to_svg(outfile, scaling, precision)\n for reference in self.references:\n reference.to_svg(outfile, scaling, precision)\n outfile.write(\"</g>\\n\")", "def hash_raw_png(path: str, hashobj: Hashobj) -> None:\n\n def filter_chunks(ct: bytes) -> bool:\n return ct in image_chunks\n\n for length, chunk_type, chunk, crc in iter_png(path, translate=False, verify_crc=False):\n if filter_chunks(chunk_type):\n hashobj.update(length)\n hashobj.update(chunk_type)\n hashobj.update(chunk)", "def add_icon_name_from_file(self, icon_name, filename, size=None):\n try:# TODO: Make svg actually recognized\n pixbuf = GdkPixbuf.Pixbuf.new_from_file(filename)\n self.add_icon_name_from_pixbuf(icon_name, pixbuf, size)\n except Exception as e:\n print \"exception in icons.py IconManager.add_icon_name_from_file\"\n print e\n # Happens if, e.g., librsvg is not installed.", "def dump_graph(self) -> str:\n graph_dot_file = f'{self._name}.dot'\n graph_diagram_file = f'{self._name}.svg'\n write_dot(self._graph, graph_dot_file)\n subprocess.check_output(\n shlex.split(f'dot -Tsvg {graph_dot_file} -o {graph_diagram_file}')\n )\n return graph_diagram_file", "def main():\n parser = argparse.ArgumentParser(\n \"Read from graph in algviz JSON format and write SVG using PyGraphViz\")\n parser.add_argument(\"infile\", type=argparse.FileType(\"r\"),\n help=\"input file. - for stdin\")\n parser.add_argument(\"outfile\", type=argparse.FileType(\"wb\"),\n help=\"output file (to be overwritten). - for stdout\")\n parser.add_argument(\"--prog\", \"-p\", type=str, default=\"neato\", choices=[\n 'neato', 'dot', 'twopi', 'circo', 'fdp', 'sfdp'],\n help=\"A GraphViz graph-drawing algorithm to use\")\n parser.add_argument(\"--uid\", \"-u\", type=str, default=None,\n help=(\"uid of graph to be drawn, if there is more than\"\n \" one graph in the snapshot.\"))\n parser.add_argument(\"--var\", \"-r\", default=None, type=str,\n help=\"var name of graph. Takes precedence over UID.\")\n args = parser.parse_args()\n\n # Even though we asked for args.infile to be opened in binary mode, stdin\n # will be opened in text mode...\n if 'b' in args.outfile.mode:\n outfile = args.outfile\n else:\n # ... So we use the use the underlying buffer to write binary data to stdout\n outfile = args.outfile.buffer\n # Now we can do the actual decoding and drawing\n snapshot = json_objects.decode_snapshot_text(args.infile.read())\n if args.var:\n graph = snapshot.names[args.var]\n elif args.uid:\n graph = snapshot.obj_table.getuid(args.uid)\n else:\n # Just search for the first graph we find in the snapshot\n graph = None\n for obj in snapshot.obj_table.values():\n if isinstance(obj, structures.Graph):\n graph = obj\n break\n if graph is None:\n raise Exception(\"No graph found in JSON input\")\n\n gv_graph = graph_to_pgv(graph)\n gv_graph.layout(prog=args.prog)\n gv_graph.draw(path=outfile, format=\"svg\")", "def _repr_svg_(self):\n try:\n return self.mol._repr_svg_()\n except AttributeError:\n return None", "def write_header(out):\n\n out.write(\"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<!DOCTYPE svg PUBLIC \"-//W3C//DTD SVG 1.0//EN\" \"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd\" [\n<!ENTITY ns_svg \"http://www.w3.org/2000/svg\">\n]>\n<svg xmlns=\"&ns_svg;\" width=\"%d\" height=\"%d\" overflow=\"visible\">\n <g id=\"Layer_1\">\n\"\"\" % (WIDTH, HEIGHT))", "def get_xml(self):\n profile = self.profile\n version = self.version\n #self.attribs['xmlns'] = \"http://www.w3.org/2000/svg\"\n self.attribs['xmlns:xlink'] = \"http://www.w3.org/1999/xlink\"\n self.attribs['xmlns:ev'] = \"http://www.w3.org/2001/xml-events\"\n\n self.attribs['baseProfile'] = profile\n self.attribs['version'] = version\n return super(Drawing, self).get_xml()", "def post_drawing_svg_async(self, request):\n HttpRequest = request.to_http_info(self.api_client.configuration)\n return self.__make_request_async(HttpRequest, 'POST', 'file')", "def _calc_hash(self) -> None:\n self.image = Image.open(self.path)\n self.image = self.image.convert(\"L\")\n self.image = self.image.resize((self.width, self.height), Image.ANTIALIAS)\n lpixels = list(self.image.getdata())\n self.hash = \"0b\"\n for i, pixel in enumerate(lpixels):\n if (i + 1) % self.width == 0 and i != 0:\n continue\n if pixel < lpixels[i + 1]:\n self.hash += \"1\"\n continue\n self.hash += \"0\"\n self.hash_hex = DHash.bin2hex(self.hash)", "def _append_png(self, png, before_prompt=False, metadata=None):\n self._append_custom(self._insert_png, png, before_prompt, metadata=metadata)", "def write_svg(self, filename):\n\n aspect_ratio = self.nx / self.ny\n # Pad the maze all around by this amount.\n padding = 10\n # Height and width of the maze image (excluding padding), in pixels\n height = 500\n width = int(height * aspect_ratio)\n # Scaling factors mapping maze coordinates to image coordinates\n scy, scx = height / self.ny, width / self.nx\n\n def write_wall(f, x1, y1, x2, y2):\n \"\"\"Write a single wall to the SVG image file handle f.\"\"\"\n f.write('\\t<line x1=\"{}\" y1=\"{}\" x2=\"{}\" y2=\"{}\"/>\\n'\n .format(x1, y1, x2, y2),)\n\n def write_circle(file, x_coordinate, y_coordinate, radius, color):\n \"\"\"Write an image to the SVG\"\"\"\n file.write('\\t<circle cx=\"{}\" cy=\"{}\" r=\"{}\" fill=\"{}\"/>\\n'\n .format(x_coordinate, y_coordinate, radius, color))\n\n # Write the SVG image file for maze\n with open(filename, 'w') as f:\n # SVG preamble and styles.\n f.write('<?xml version=\"1.0\" encoding=\"utf-8\"?>')\n f.write('<svg\\n\\txmlns=\"http://www.w3.org/2000/svg\"\\n'\n '\\txmlns:xlink=\"http://www.w3.org/1999/xlink\"\\n')\n f.write('\\twidth=\"{:d}\" height=\"{:d}\" viewBox=\"{} {} {} {}\">'\n .format(width+2*padding, height+2*padding,\n -padding, -padding, width+2*padding, height+2*padding))\n f.write('<defs>\\n<style type=\"text/css\"><![CDATA[line {\\n')\n f.write('\\tstroke: #000000;\\n\\tstroke-linecap: square;\\n\\tstroke-width: 5;\\n}')\n f.write(']]></style>\\n</defs>\\n')\n # Draw the \"South\" and \"East\" walls of each cell, if present (these\n # are the \"North\" and \"West\" walls of a neighbouring cell in\n # general, of course).\n for x in range(self.nx):\n for y in range(self.ny):\n # print(str(x) + \" \" + str(y))\n if self.cell_at(x, y).walls['S']:\n x1, y1, x2, y2 = x*scx, (y+1)*scy, (x+1)*scx, (y+1)*scy\n write_wall(f, x1, y1, x2, y2)\n if self.cell_at(x, y).walls['E']:\n x1, y1, x2, y2 = (x+1)*scx, y*scy, (x+1)*scx, (y+1)*scy\n write_wall(f, x1, y1, x2, y2)\n\n # Draw any circle in the maze\n if self.cell_at(x, y).occupied:\n adjustment = (3*padding) / 2\n _x = x*scx + adjustment\n _y = y * scy + adjustment\n\n if self.cell_at(x, y).is_current_position:\n write_circle(file=f,\n x_coordinate=_x,\n y_coordinate=_y,\n radius=padding,\n color=\"blue\")\n elif self.cell_at(x, y).is_objective:\n write_circle(file=f,\n x_coordinate=_x,\n y_coordinate=_y,\n radius=padding,\n color=\"green\")\n else:\n write_circle(file=f,\n x_coordinate=_x,\n y_coordinate=_y,\n radius=padding,\n color=\"red\")\n\n # Draw the North and West maze border, which won't have been drawn\n # by the procedure above.\n f.write('\\t<line x1=\"0\" y1=\"0\" x2=\"{}\" y2=\"0\"/>\\n'.format(width))\n f.write('\\t<line x1=\"0\" y1=\"0\" x2=\"0\" y2=\"{}\"/>\\n'.format(height))\n f.write('</svg>')", "def writeChecksum(self):\n return self._writeMessage(1, [self.checksum], 'writeChecksum')", "def write_image(path, tokens, weights):\n\n f = render_attn_inner(tokens, weights)\n f.savefig(path, bbox_inches=\"tight\", frameon=False)\n plt.close(f)", "def draw(self, stats=[]):\n clear_output(wait=True)\n svg_html = self.to_html(stats)\n display(svg_html)", "def makePNG(self,outDir=os.getcwd(),tmpFname='temp.R'):\n rscript = \"\"\"\nname<-'%s'\ncontig<-'%s'\nstart<-%d\nend<-%d\nstrand<-'%s'\nexonLengths<-c(%s)\nexonOffsets<-c(%s)\nmyLen<-end-start+1\n\npng(filename=paste('%s/',name,'.png',sep=''),width=900,height=300)\nplot.new()\nplot.window(xlim=c(start,end),ylim=c(0,3))\naxis(1)\ntitle(xlab=contig)\ntitle(main=name)\nlines(seq(start,end+1),rep(1,myLen+1),col='blue',lwd=2,lend='butt')\n\nsegments(start+exonOffsets,rep(1,length(exonOffsets)),start+exonOffsets+exonLengths,rep(1,length(exonOffsets)),col='blue',lwd=20,lend='butt')\nif (strand=='+'){\n arrows(start,1.5,(start+(myLen*0.05)),1.5,length=0.125,lwd=1.5,angle=30,col='black')\n} else if (strand=='-') {\n arrows(end,0.5,(end-(myLen*0.05)),0.5,length=0.125,lwd=1.5,angle=30,col='black')\n}\n\n\ndev.off()\"\"\" % (self.name,self.chr,self.start,self.end,self.strand,\",\".join([str(x) for x in self.exonLengths]),\",\".join([str(x) for x in self.exonOffsets]),outDir)\n tmpHandle = open(tmpFname,'w')\n print >>tmpHandle, rscript\n tmpHandle.close()\n commands.getoutput('R CMD BATCH --vanilla %s' % tmpFname)\n os.remove(tmpFname)\n return", "def upload_icon(self, path):\n with open(path, 'rb') as f:\n return self.change_icon(f.read())" ]
[ "0.6322185", "0.6220869", "0.6096771", "0.6001254", "0.58138853", "0.57505566", "0.5744236", "0.57401705", "0.56943905", "0.5545867", "0.5537553", "0.5533253", "0.5466105", "0.5450053", "0.5445687", "0.54211104", "0.5349227", "0.53329223", "0.532195", "0.5315566", "0.5292804", "0.524606", "0.5226282", "0.5220245", "0.5200455", "0.5175482", "0.51639396", "0.5158339", "0.5121", "0.5101955", "0.50866354", "0.5058399", "0.50229514", "0.50107616", "0.50096357", "0.50037664", "0.49703148", "0.49573117", "0.49560007", "0.4941185", "0.49249452", "0.4924794", "0.49191922", "0.49160647", "0.4894512", "0.48872674", "0.48850003", "0.48810402", "0.48683175", "0.48683175", "0.48530442", "0.4841676", "0.48247677", "0.48232755", "0.48118728", "0.4810999", "0.48039153", "0.4781951", "0.47660443", "0.47518677", "0.47412515", "0.4732062", "0.47297916", "0.4728463", "0.47281715", "0.4725265", "0.47213227", "0.47183326", "0.4717553", "0.47167087", "0.4705458", "0.46826383", "0.46697655", "0.4666845", "0.4666485", "0.46642768", "0.46597058", "0.46573943", "0.4654587", "0.46523497", "0.46510124", "0.4649515", "0.4646786", "0.46377295", "0.46338943", "0.46310893", "0.46198744", "0.4611828", "0.46084902", "0.4596902", "0.45856702", "0.45826778", "0.45714372", "0.45504934", "0.4545243", "0.45431313", "0.45426357", "0.45424864", "0.45345712", "0.45310915" ]
0.6853433
0
Check if this file has been created before if so, just return the S3 URL. Returns None otherwise
def is_duplicate_checksum(checksum): s3 = boto3.client('s3') response = s3.list_objects_v2( Bucket=BUCKET, EncodingType='url', Prefix=checksum ) if response['KeyCount'] > 0 and len(response['Contents']) > 0: return 'https://s3.amazonaws.com/%s/%s' % (BUCKET, response['Contents'][0]['Key']) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_datafile_url(self):\n try:\n return self.datafile.url\n except ValueError:\n if core.utils.is_absolute_url(self.source):\n if self.source.startswith('s3://'):\n return None # file is in the UPLOAD_BUCKET\n return self.source\n logger.error(\"File not found at '%s'\", self.datafile.name)\n return None", "def get_s3_object(self, remote_s3_url):\n try:\n _file = tempfile.mkstemp()[1]\n parsed_s3_path = remote_s3_url.split(\"/\", 3) # s3://bucket-name/key\n remote_bucket = parsed_s3_path[2] # Bucket name\n remote_key = parsed_s3_path[3] # Key\n self.download_file(remote_bucket, remote_key, _file)\n return _file\n except Exception as e:\n message = {'FILE': __file__.split('/')[-1],\n 'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}\n self.logger.exception(message)\n raise", "def get_s3_file(self, no_copy=False):\n return self.get_file(uri_type=URI_S3, no_copy=no_copy)", "def _copy_s3_to_local(src_bucket: str, src_key: str, dest: str)->bool:\n s3_resource = boto3.resource('s3')\n try:\n s3_resource.Bucket(src_bucket).download_file(src_key, dest)\n except Exception as exc:\n raise Error(\"Error {} occurred while working on s3 object to local.\".format(exc))\n \n return True", "def get_file_path(self, name):\n if self.folder.type != \"s3\":\n return super(NereidStaticFile, self).get_file_path(name)\n\n cloudfront = config.get('nereid_s3', 'cloudfront')\n if cloudfront:\n return '/'.join([cloudfront, self.s3_key])\n\n return \"https://s3.amazonaws.com/%s/%s\" % (\n config.get('nereid_s3', 'bucket'), self.s3_key\n )", "def get_url(self, name):\n if self.folder.type != 's3':\n return super(NereidStaticFile, self).get_url(name)\n\n cloudfront = config.get('nereid_s3', 'cloudfront')\n if cloudfront:\n return '/'.join([cloudfront, self.s3_key])\n\n return \"https://s3.amazonaws.com/%s/%s\" % (\n config.get('nereid_s3', 'bucket'), self.s3_key\n )", "def _s3_stash(self):\n s3_url = 's3://{}/{}'.format(BUCKET, self.atom_file)\n bucketpath = BUCKET.strip(\"/\")\n bucketbase = BUCKET.split(\"/\")[0]\n parts = urlparse.urlsplit(s3_url)\n mimetype = 'application/xml' \n \n conn = boto.connect_s3()\n\n try:\n bucket = conn.get_bucket(bucketbase)\n except boto.exception.S3ResponseError:\n bucket = conn.create_bucket(bucketbase)\n self.logger.info(\"Created S3 bucket {}\".format(bucketbase))\n\n if not(bucket.get_key(parts.path)):\n key = bucket.new_key(parts.path)\n key.set_metadata(\"Content-Type\", mimetype)\n key.set_contents_from_filename(self.atom_file)\n msg = \"created {0}\".format(s3_url)\n self.logger.info(msg)\n else:\n key = bucket.get_key(parts.path)\n key.set_metadata(\"Content-Type\", mimetype)\n key.set_contents_from_filename(self.atom_file)\n msg = \"re-uploaded {}\".format(s3_url)\n self.logger.info(msg)", "def s3resource(self):\n return self._s3resource", "def url(self):\n if not self.fid:\n raise exceptions.NotCreatedError(object=self)\n\n return self._file_url(self.fid)", "def get_s3_object(bucket, key_name, local_file):\n\n tracer.put_metadata('object', f's3://{bucket}/{key_name}')\n\n try:\n s3_resource.Bucket(bucket).download_file(key_name, local_file)\n result = 'ok'\n tracer.put_annotation('OBJECT_DOWNLOAD', 'SUCCESS')\n except botocore.exceptions.ClientError as e:\n tracer.put_annotation('OBJECT_DOWNLOAD', 'FAILURE')\n if e.response['Error']['Code'] == '404':\n result = f'Error: s3://{bucket}/{key_name} does not exist'\n else:\n result = f'Error: {str(e)}'\n\n return(result)", "def getOriginalFile(url):\n # does url exist?\n if url is None or url is \"\":\n return", "def url_for(filename):\n return \"{}{}\".format(S3_LOCATION, filename)", "def overwrite_url(self):\n if self.has_url_overwrite:\n return self.path\n return None", "def check_reference_open(refpath):\n if refpath != \"N/A\" and refpath.strip() != \"\":\n if s3_utils.is_s3_uri(refpath):\n if not s3_utils.object_exists(refpath):\n raise RuntimeError(\"S3 object does not exist: \" + refpath)\n else:\n with open(refpath, \"rb\"):\n pass\n return refpath", "def _create_s3_bucket_if_not_exist(self, prefix):\n account = self.boto_session.client(\"sts\").get_caller_identity()[\"Account\"]\n region = self.boto_session.region_name\n s3_bucket_name = \"{}-{}-{}\".format(prefix, region, account)\n\n s3 = self.boto_session.resource(\"s3\")\n s3_client = self.boto_session.client(\"s3\")\n try:\n # 'us-east-1' cannot be specified because it is the default region:\n # https://github.com/boto/boto3/issues/125\n if region == \"us-east-1\":\n s3.create_bucket(Bucket=s3_bucket_name)\n else:\n s3.create_bucket(\n Bucket=s3_bucket_name, CreateBucketConfiguration={\"LocationConstraint\": region}\n )\n logger.info(\n \"Successfully create S3 bucket '{}' for storing {} data\".format(\n s3_bucket_name, prefix\n )\n )\n except ClientError as e:\n error_code = e.response[\"Error\"][\"Code\"]\n message = e.response[\"Error\"][\"Message\"]\n\n if error_code == \"BucketAlreadyOwnedByYou\":\n pass\n elif (\n error_code == \"OperationAborted\" and \"conflicting conditional operation\" in message\n ):\n # If this bucket is already being concurrently created, we don't need to create it again.\n pass\n elif error_code == \"TooManyBuckets\":\n # Succeed if the default bucket exists\n s3.meta.client.head_bucket(Bucket=s3_bucket_name)\n else:\n raise\n\n s3_waiter = s3_client.get_waiter(\"bucket_exists\")\n s3_waiter.wait(Bucket=s3_bucket_name)\n return s3_bucket_name", "def _s3_origin(self):\n pipeline_builder = self.sdc_builder.get_pipeline_builder()\n s3_origin = pipeline_builder.add_stage('Amazon S3', type='origin')\n s3_origin.set_attributes(bucket=self.environments['aws'].s3_bucket_name,\n common_prefix='origin_data',\n prefix_pattern=f\"{DATASETS[self.dataset]['file_pattern']}\",\n data_format='DELIMITED',\n header_line='WITH_HEADER',\n delimiter_format_type='CUSTOM',\n delimiter_character=DATASETS[self.dataset]['delimiter'],\n number_of_threads=self.number_of_threads,\n max_batch_size_in_records=self.batch_size)\n return s3_origin, pipeline_builder", "def _file_exists_in_s3(client, key):\n try:\n obj = client.head_object(Bucket=bucket, Key=key)\n return obj[\"ContentLength\"]\n except ClientError as exc:\n if exc.response[\"Error\"][\"Code\"] != \"404\":\n raise", "def s3(self) -> Optional[pulumi.Input['FlowSourceFlowConfigSourceConnectorPropertiesS3Args']]:\n return pulumi.get(self, \"s3\")", "def s3(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"s3\")", "def _get_file_from_s3(metadata, saltenv, bucket_name, path, cached_file_path):\n (\n key,\n keyid,\n service_url,\n verify_ssl,\n kms_keyid,\n location,\n path_style,\n https_enable,\n ) = _get_s3_key()\n\n # check the local cache...\n if os.path.isfile(cached_file_path):\n file_meta = _find_file_meta(metadata, bucket_name, saltenv, path)\n if file_meta:\n file_etag = file_meta[\"ETag\"]\n\n if file_etag.find(\"-\") == -1:\n file_md5 = file_etag\n cached_md5 = salt.utils.hashutils.get_hash(cached_file_path, \"md5\")\n\n # hashes match we have a cache hit\n if cached_md5 == file_md5:\n return\n else:\n cached_file_stat = os.stat(cached_file_path)\n cached_file_size = cached_file_stat.st_size\n cached_file_mtime = datetime.datetime.fromtimestamp(\n cached_file_stat.st_mtime\n )\n\n cached_file_lastmod = datetime.datetime.strptime(\n file_meta[\"LastModified\"], \"%Y-%m-%dT%H:%M:%S.%fZ\"\n )\n if (\n cached_file_size == int(file_meta[\"Size\"])\n and cached_file_mtime > cached_file_lastmod\n ):\n log.debug(\n \"cached file size equal to metadata size and \"\n \"cached file mtime later than metadata last \"\n \"modification time.\"\n )\n ret = __utils__[\"s3.query\"](\n key=key,\n keyid=keyid,\n kms_keyid=keyid,\n method=\"HEAD\",\n bucket=bucket_name,\n service_url=service_url,\n verify_ssl=verify_ssl,\n location=location,\n path=urllib.parse.quote(path),\n local_file=cached_file_path,\n full_headers=True,\n path_style=path_style,\n https_enable=https_enable,\n )\n if ret is not None:\n for header_name, header_value in ret[\"headers\"].items():\n name = header_name.strip()\n value = header_value.strip()\n if str(name).lower() == \"last-modified\":\n s3_file_mtime = datetime.datetime.strptime(\n value, \"%a, %d %b %Y %H:%M:%S %Z\"\n )\n elif str(name).lower() == \"content-length\":\n s3_file_size = int(value)\n if (\n cached_file_size == s3_file_size\n and cached_file_mtime > s3_file_mtime\n ):\n log.info(\n \"%s - %s : %s skipped download since cached file size \"\n \"equal to and mtime after s3 values\",\n bucket_name,\n saltenv,\n path,\n )\n return\n\n # ... or get the file from S3\n __utils__[\"s3.query\"](\n key=key,\n keyid=keyid,\n kms_keyid=keyid,\n bucket=bucket_name,\n service_url=service_url,\n verify_ssl=verify_ssl,\n location=location,\n path=urllib.parse.quote(path),\n local_file=cached_file_path,\n path_style=path_style,\n https_enable=https_enable,\n )", "def retrieve_s3_contents ( s3_conn, bucket_name, key_name, stored_filename = None ) :\n bucket = s3_conn.get_bucket( bucket_name )\n key = boto.s3.key.Key( bucket )\n key.key = key_name\n if key.exists( ) :\n if stored_filename :\n key.get_contents_to_filename( stored_filename )\n return stored_filename\n\n return key.get_contents_as_string( )\n\n return None", "def s3(self) -> Optional['outputs.DataRepositoryAssociationS3']:\n return pulumi.get(self, \"s3\")", "def get_bucket_file_url(bucket, key):\n\t#https://s3.amazonaws.com/link-checker/2018-05-27-235740.txt\n\tfile_url = \"https://s3.amazonaws.com/\" + bucket + \"/\" + key\n\treturn file_url", "def s3(self) -> Optional[pulumi.Input['FlowDestinationFlowConfigDestinationConnectorPropertiesS3Args']]:\n return pulumi.get(self, \"s3\")", "def get_previously_valid_urls():\n\n previously_valid_urls = get_json(BUCKET, PREVIOUSLY_VALID_URLS_S3_KEY)\n\n # If previously_valid_urls does not currently exist, create a new one and save to S3\n if not previously_valid_urls:\n previously_valid_urls = []\n save_previously_valid_urls(previously_valid_urls)\n\n return set(previously_valid_urls)", "def thumbnail_url_if_set(self):\n return self.thumbnail.url if self.thumbnail else self.file.url", "def check_for_url(self, s3url):\n bucket, key = S3Hook.parse_s3_url(s3url)\n s3hook = S3Hook(aws_conn_id=self.aws_conn_id)\n if not s3hook.check_for_bucket(bucket_name=bucket):\n raise AirflowException(\n \"The input S3 Bucket {} does not exist \".format(bucket))\n if key and not s3hook.check_for_key(key=key, bucket_name=bucket)\\\n and not s3hook.check_for_prefix(\n prefix=key, bucket_name=bucket, delimiter='/'):\n # check if s3 key exists in the case user provides a single file\n # or if s3 prefix exists in the case user provides a prefix for files\n raise AirflowException(\"The input S3 Key \"\n \"or Prefix {} does not exist in the Bucket {}\"\n .format(s3url, bucket))\n return True", "def s3_url(row):\n return f's3://{row[\"Bucket\"]}/{row[\"Key\"]}'", "def _s3_get_file(url):\n try:\n return S3().get_contents_from_url(url)\n except Exception as e:\n raise ScrBaseException(\"Could not load file from {0}: {1}\".format(url, e))", "def s3_etag(url, proxies=None):\n\ts3_resource = boto3.resource (\"s3\", config=Config (proxies=proxies))\n\tbucket_name, s3_path = split_s3_path (url)\n\ts3_object = s3_resource.Object (bucket_name, s3_path)\n\treturn s3_object.e_tag", "def test_get_url(self):\n package = make_package()\n response = self.storage.download_response(package)\n\n parts = urlparse(response.location)\n self.assertEqual(parts.scheme, 'https')\n self.assertEqual(parts.netloc, 'mybucket.s3.amazonaws.com')\n self.assertEqual(parts.path, '/' + self.storage.get_path(package))\n query = parse_qs(parts.query)\n self.assertItemsEqual(query.keys(), ['Expires', 'Signature',\n 'AWSAccessKeyId'])\n self.assertTrue(int(query['Expires'][0]) > time.time())\n self.assertEqual(query['AWSAccessKeyId'][0],\n self.settings['storage.access_key'])", "def download(url, bucket_id, key_prefix):\n\n baseFile = '_'.join(url.split('/')[-4:]) #os.path.basename(url)\n\n #move the file to a more uniq path\n os.umask(0002)\n temp_path = \"/tmp/\"\n file = os.path.join(temp_path,baseFile)\n bucket = conn.get_bucket(bucket_id)\n key = bucket.get_key(key_prefix + baseFile, validate=False)\n s3_exists = key.exists()\n file_exists = os.path.isfile(file)\n \n if not file_exists and s3_exists:\n sys.stderr.write(\"Downloading %s from S3\\n\"%url)\n key.get_contents_to_filename(file)\n sys.stderr.write(\"Downloaded %s from S3\\n\"%url)\n elif not file_exists and not s3_exists:\n sys.stderr.write(\"Downloading %s from the web\\n\"%url)\n try:\n req = urllib2.urlopen(url)\n total_size = int(req.info().getheader('Content-Length').strip())\n downloaded = 0\n CHUNK = 256 * 10240\n with open(file, 'wb') as fp:\n while True:\n chunk = req.read(CHUNK)\n downloaded += len(chunk)\n #print math.floor( (downloaded / total_size) * 100 )\n if not chunk: break\n fp.write(chunk)\n except urllib2.HTTPError, e:\n sys.stderr.write(\"HTTP Error: %s %s\\n\"%(e.code , url))\n return False\n except urllib2.URLError, e:\n sys.stderr.write(\"URL Error: %s %s\\n\"%(e.reason , url))\n return False\n sys.stderr.write(\"Downloaded %s from the web\\n\"%url)\n\n if not s3_exists:\n sys.stderr.write(\"Uploading %s to S3\\n\"%url)\n key.set_contents_from_filename(file)\n\n sys.stderr.write(\"File ready: %s\\n\"%url)\n return file", "def save_previously_valid_urls(previously_valid_urls):\n\n LOGGER.info(f\"Writing previously_valid_urls to bucket {BUCKET} with key {PREVIOUSLY_VALID_URLS_S3_KEY}.\") \n save_json(BUCKET, PREVIOUSLY_VALID_URLS_S3_KEY, previously_valid_urls)", "def s3_resource(self):\n return boto3.resource('s3', \n aws_access_key_id=os.environ.get(\"MINIO_ACCESS_KEY\"),\n aws_secret_access_key=os.environ.get(\"MINIO_SECRET_KEY\"),\n endpoint_url=f'http://{os.environ.get(\"MINIO_SERVER\")}',\n config=Config(signature_version='s3v4')\n )", "def s3_request(func):\n\n\t@wraps (func)\n\tdef wrapper(url, *args, **kwargs):\n\t\ttry:\n\t\t\treturn func (url, *args, **kwargs)\n\t\texcept ClientError as exc:\n\t\t\tif int (exc.response[\"Error\"][\"Code\"]) == 404:\n\t\t\t\traise EnvironmentError (\"file {} not found\".format (url))\n\t\t\telse:\n\t\t\t\traise\n\n\treturn wrapper", "def test_bucket_availability(self):\n s3 = boto3.resource('s3')\n bucket = s3.Bucket(app.config['S3_PHOTO_BUCKET'])\n exists = True\n try:\n s3.meta.client.head_bucket(Bucket=app.config['S3_PHOTO_BUCKET'])\n self.assertEqual(exists, True)\n except botocore.exceptions.ClientError as e:\n # If a client error is thrown, then check that it was a 404 error.\n # If it was a 404 error, then the bucket does not exist.\n error_code = e.response['Error']['Code']\n if error_code == '404':\n exists = False\n self.assertEqual(exists, True, msg='Bucket is not exist!')", "def create_presigned_url(bucket_name, bucket_key, expiration=3600, signature_version=s3_signature['v4']):\n s3_client = boto3.client('s3',\n aws_access_key_id=AWS_ACCESS_KEY_ID,\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY,\n config=Config(signature_version=signature_version),\n region_name=AWS_DEFAULT_REGION\n )\n try:\n response = s3_client.generate_presigned_url('get_object',\n Params={'Bucket': bucket_name,\n 'Key': bucket_key},\n ExpiresIn=expiration)\n print(s3_client.list_buckets()['Owner'])\n for key in s3_client.list_objects(Bucket=bucket_name, Prefix=bucket_key)['Contents']:\n print(key['Key'])\n except ClientError as e:\n logging.error(e)\n return None\n # The response contains the presigned URL\n return response", "def get_blob_url(self, download_meta):\n bucket_name, key = self._get_bucket_key(download_meta)\n location = self.s3.generate_presigned_url(\n ClientMethod='get_object',\n ExpiresIn=36*60*60,\n Params={'Bucket': bucket_name, 'Key': key})\n return location", "def get_available_name(self, name):\n if self.file_overwrite:\n name = self._clean_name(name)\n return name\n\n return super(S3, self).get_available_name(name)", "def mock_state(self):\n if self.name in self.connection.mock_s3_fs:\n return self.connection.mock_s3_fs[self.name]['keys']\n else:\n raise boto.exception.S3ResponseError(404, 'Not Found')", "def upload_file_to_s3(self, file_data):\r\n\r\n file_key = file_data.name + datetime.now(UTC).strftime(\r\n xqueue_interface.dateformat\r\n )\r\n\r\n file_data.seek(0)\r\n s3_public_url = upload_to_s3(\r\n file_data, file_key, self.s3_interface\r\n )\r\n\r\n return s3_public_url", "def s3_data_conn ( self ) :\n if not self.s3_data :\n self.s3_data = boto.s3.connection.S3Connection( self.access_key, self.access_key_secret )\n return self.s3_data", "def image_url(self, name):\r\n s3_key = self._generate_s3_key(name)\r\n return s3_key.generate_url(self.IMAGE_LINK_DURATION)", "def test_get_s3_path(self):\n truth_path = 'MCD43A4.006/12/07/2015266'\n path = modis.get_s3_path(self.fname)\n self.assertEqual(path, truth_path)\n prefix = 'test'\n path = modis.get_s3_path(self.fname, prefix=prefix)\n self.assertEqual(path, os.path.join(prefix, truth_path))", "def generate_download_url(self):\n access_key = getattr(settings, 'AWS_ACCESS_KEY_ID')\n secret_key = getattr(settings, 'AWS_SECRET_ACCESS_KEY')\n bucket = getattr(settings, 'AWS_STORAGE_BUCKET_NAME')\n region = getattr(settings, 'S3DIRECT_REGION')\n if not bucket or not region or not access_key or not secret_key:\n return '/product-not-found/' # TODO: raise custom 404 error\n\n PROTECTED_DIR_NAME = getattr(settings, 'PROTECTED_DIR_NAME', 'protected')\n # for aws, self.file is equivalent to self.file.path in local\n path = '{base}/{file_path}'.format(base=PROTECTED_DIR_NAME, file_path=str(self.file))\n\n aws_dl_object = AWSDownload(access_key, secret_key, bucket, region)\n file_url = aws_dl_object.generate_url(path, new_filename=self.display_name)\n return file_url", "def test_amazon_s3_store_filename(self):\n config = Config()\n metadata_bucket = config.config.get(\"metadata\", \"bucket\")\n data_bucket = config.config.get(\"data\", \"bucket\")\n metadata_provider = amazon.S3(config, metadata_bucket).connect()\n provider = amazon.S3(config, data_bucket).connect()\n key = checksum_file(\"LICENSE\")\n metadata_provider.store(key, \"LICENSE METADATA\")\n provider.store_from_filename(key, \"LICENSE\")\n t = tempfile.NamedTemporaryFile()\n metadata = metadata_provider.retrieve(key)\n provider.retrieve_to_filename(key, t.name)\n self.assertEqual(file(\"LICENSE\").read(), file(t.name).read())\n self.assertEqual(\"LICENSE METADATA\", metadata)\n metadata_provider.delete(key)\n provider.delete(key)\n metadata_provider.disconnect()\n provider.disconnect()", "def type(self):\n return 's3_file'", "def get_file(cls, url, working_dir):\n if url.lower().startswith(\"s3://\"):\n return cls._s3_get_file(url)\n elif url.lower().startswith(\"http\"):\n return cls._http_get_file(url)\n else:\n return cls._fs_get_file(url, working_dir)", "def do_s3_static_url(parser, token):\n return do_s3_media_url(parser, token, static=True)", "def s3_request(func):\n\n @wraps(func)\n def wrapper(url, *args, **kwargs):\n try:\n return func(url, *args, **kwargs)\n except ClientError as exc:\n if int(exc.response[\"Error\"][\"Code\"]) == 404:\n raise EnvironmentError(\"file {} not found\".format(url))\n else:\n raise\n\n return wrapper", "def s3_request(func):\n\n @wraps(func)\n def wrapper(url, *args, **kwargs):\n try:\n return func(url, *args, **kwargs)\n except ClientError as exc:\n if int(exc.response[\"Error\"][\"Code\"]) == 404:\n raise EnvironmentError(\"file {} not found\".format(url))\n else:\n raise\n\n return wrapper", "def get_file(self, key, local_file):\n\t\t\n\t\ttry:\n\t\t\tfh = open(local_file, 'wb')\n\t\t\tfh.write(self.s3.get(self.bucket, key).object.data)\n\t\t\tfh.close()\n\t\texcept:\n\t\t\treturn False", "def url(cls, bucket, path):\n if path.startswith('/'):\n path = path[1:]\n if bucket.startswith('http://') or bucket.startswith('https://'):\n url = bucket\n else:\n url = cls.S3_BASE + bucket\n if not url.endswith('/'):\n url += '/'\n return url + path", "def __check_metadata(s3client, key, bucket_name):\n response = s3client.head_object(Bucket=bucket_name, Key=key)\n if 'status' in response['Metadata']:\n return response['Metadata']['status'] == 'uploaded'\n return False", "def get_s3_object(self, key):\n try:\n bucket_name = app.config['S3_BUCKET_NAME']\n s3_client = app.config['S3']\n response = s3_client.get_object(Bucket=bucket_name, Key=key)\n return response['Body'].read()\n except Exception:\n return None", "def get_s3_url(self, bucket=None, region=None):\n \n if bucket is None:\n bucket = self.AWS_S3_BUCKET\n \n if region is None:\n region = self.AWS_S3_REGION\n \n return \"https://{}.s3.{}.amazonaws.com/\".format(bucket, region)", "def _copy_local_to_s3(src: str, dest_bucket: str, dest_key: str)->bool:\n s3_client = boto3.client('s3')\n try:\n response = s3_client.upload_file(src, dest_bucket, dest_key)\n except Exception as exc:\n raise Error(\"Error {} occurred while working on local object to s3.\".format(exc))\n \n return True", "def CreateS3Bucket(self):\n bucketFound = False\n region = \"eu-west-1\"\n try: # Check if bucket exists\n client.head_bucket(Bucket=self.bucketName)\n bucketFound = True\n s3Log.info (\"Bucket \\'{}\\' Exists! \".format(self.bucketName))\n except ClientError as e: # Bucket Does not exist\n if e.response[\"Error\"][\"Message\"] == \"Not Found\":\n s3Log.info(\"Bucket \\'{}\\' does not exist!\".format(self.bucketName))\n\n if bucketFound == 0: #since bucket does not exist, we ought to create it\n s3Log.info(\"Creating Bucket \\'{}\\' in region={}\".format(self.bucketName, region))\n try:\n bucket_response = client.create_bucket(Bucket=self.bucketName,\n CreateBucketConfiguration={\n 'LocationConstraint': region})\n bucketFound = True\n except ClientError as e:\n s3Log.error(\"FATAL ERROR: Unable to create bucket \\'{}\\' {}\".format(self.bucketName, e))\n sys.exit(1)\n\n\n return bucketFound", "async def get_or_create_temporary_s3_access(user_id: UserID):", "def test_exists_cache() -> None:\n s3_client = boto3.client(\"s3\", region_name=\"us-east-1\")\n s3_client.create_bucket(Bucket=\"example-bucket\")\n\n # Object should not exist.\n assert not File(\"s3://example-bucket/a\").exists()\n assert File(\"s3://example-bucket/a\").get_hash() == \"cb7880ecc11723b8b8cad37f6b5160251d7a765e\"\n\n # Update object outside of s3fs.\n s3_client.put_object(Body=b\"hello\", Bucket=\"example-bucket\", Key=\"a\")\n\n # Using the normal s3fs exists(), the existance check would be cached and\n # would now return an incorrect result.\n\n # However, File.exists() avoids using the s3fs cache and gives the correct result.\n # The hash should update as well.\n assert File(\"s3://example-bucket/a\").exists()\n assert File(\"s3://example-bucket/a\").get_hash() == \"ea438dc20234f0226736d407d7caba13f7e3c49e\"\n\n # Directory should not exist.\n assert not Dir(\"s3://example-bucket/dir/\").exists()\n\n # Update object outside of s3fs.\n s3_client.put_object(Body=b\"hello\", Bucket=\"example-bucket\", Key=\"dir/a\")\n\n # Directory should now exist.\n assert Dir(\"s3://example-bucket/dir/\").exists()", "def delete_source_from_s3(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"delete_source_from_s3\")", "def delete_source_from_s3(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"delete_source_from_s3\")", "def get_storage_location(self):\n return self.s3_bucket", "def get_s3_url(iid):\n return \"http://%s.s3-website.%s.amazonaws.com/%s\" % (\n BUCKET_NAME,\n AWS_CLIENT_CONFIG['region_name'],\n iid\n )", "def fetch(iid):\n if AWS_CLIENT_CONFIG and BUCKET_NAME:\n try:\n s3 = boto3.resource('s3', **AWS_CLIENT_CONFIG)\n obj = s3.Bucket(BUCKET_NAME).Object(iid).get()\n if obj:\n return obj.get('Body')\n except botocore.exceptions.ClientError as e:\n logger.error(e)\n else:\n # get locally from temp dir (tests, local development)\n return get_temp_file(iid)\n return None", "def default_s3_location(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"default_s3_location\")", "def s3_location(self, value):\n info = urlparse(value)\n if info.scheme != \"s3\":\n raise ValueError(\"S3 location must be a valid s3 url\\tgot={0}\".format(value))\n\n bucket = info.netloc\n if not bucket:\n raise ValueError(\"S3 location must be a valid s3 url\\tgot={0}\".format(value))\n\n key = info.path\n return S3Location(bucket, key, value)", "def _generate_s3_url(self, bucket, path):\n key = path\n\n # NOTE: path can be an empty string meaning that\n # we need to generate a URL pointing at the root directory of the bucket.\n # However, boto3 doesn't allow us to pass the key as an empty string.\n # As a workaround we set it to a dummy string and later remove it from the generated URL\n if not path:\n key = 'dummy'\n\n url = self._s3_link_client.generate_presigned_url(\n 'get_object',\n ExpiresIn=0,\n Params={\n 'Bucket': bucket,\n 'Key': key\n }\n )\n\n # If the path was an empty string we need to strip out trailing dummy string ending up with a URL\n # pointing at the root directory of the bucket\n if not path:\n url = url.replace('/' + key, '/')\n\n return url", "def s3_get(url, temp_file):\n s3_resource = boto3.resource(\"s3\")\n bucket_name, s3_path = split_s3_path(url)\n s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)", "def s3_get(url, temp_file):\n s3_resource = boto3.resource(\"s3\")\n bucket_name, s3_path = split_s3_path(url)\n s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)", "def default_s3_location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"default_s3_location\")", "def s3_etag(url):\n s3_resource = boto3.resource(\"s3\")\n bucket_name, s3_path = split_s3_path(url)\n s3_object = s3_resource.Object(bucket_name, s3_path)\n return s3_object.e_tag", "def s3_etag(url):\n s3_resource = boto3.resource(\"s3\")\n bucket_name, s3_path = split_s3_path(url)\n s3_object = s3_resource.Object(bucket_name, s3_path)\n return s3_object.e_tag", "def access_s3():\n try:\n s3helper = S3Helper()\n bucket = s3helper.get_bucket(get_archive_bucket())\n LOG.info('Access S3 bucket name: {0}'.format(bucket.name))\n except Exception:\n LOG.exception('check_database_connection')\n return False\n\n return True", "def _head_object(\n s3_conn: S3Client, bucket: str, key: str\n) -> Optional[HeadObjectOutputTypeDef]:\n try:\n return s3_conn.head_object(Bucket=bucket, Key=key)\n except botocore.exceptions.ClientError as err:\n if err.response[\"Error\"][\"Code\"] == \"404\":\n return None\n raise", "def is_valid_bucket(bucket_name: str):\n\n s3 = boto3.resource('s3')\n\n try:\n s3.meta.client.head_bucket(Bucket=bucket_name)\n syslog.syslog(syslog.LOG_INFO,\n f'Found valid S3 Bucket - {bucket_name}')\n return s3.Bucket(bucket_name)\n except ClientError as e:\n syslog.syslog(syslog.LOG_ERR,\n f'Invalid S3 Bucket - {bucket_name} - {e}')\n return None", "def initialize_file(region, bucket, filename):\n s3connection = boto.s3.connect_to_region(region)\n s3_bucket = s3connection.lookup(bucket)\n if not s3_bucket:\n raise ValueError(\"Bucket {} not found in region {}\".format(bucket, region))\n s3_file = s3_bucket.get_key(filename)\n if not s3_file:\n raise ValueError(\"Filename {} not found in bucket {}\".format(filename, bucket))\n return s3_file", "def _is_s3(path:str)->bool:\n return path.startswith(\"s3://\")", "def test_force_put_to_overwrite_existing(self):\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n uploads = self.upload_path\n filename = 'demo-test.tar.gz'\n src1 = os.path.join(uploads, filename)\n src2 = os.path.join(uploads, 'test.jpg')\n id = utils.generate_id(filename)\n backend.put_variant(src1, id, filename)\n backend.put_variant(src2, id, filename, True)\n\n path = '/'.join(backend.id_to_path(id)) + '/' + filename\n client = boto3.client('s3', **backend.credentials)\n res = client.head_object(Bucket=backend.bucket_name, Key=path)\n self.assertEquals(\n str(os.path.getsize(src2)),\n str(res['ResponseMetadata']['HTTPHeaders']['content-length'])\n )", "def get_s3_filepath(image_id, prefix=\"\", filetype=\"png\"):\n bucket_name = os.environ[\"LABELED_BUCKET_NAME\"]\n key_name = \"{prefix}/{id}.{suffix}\".format(\n prefix=prefix, id=image_id, suffix=filetype)\n url = \"https://s3.amazonaws.com/{bucket}/{key}\".format(\n bucket=bucket_name, key=key_name)\n return url", "def test_no_io_on_url():\n file = get_image_cache_file()\n file.url\n assert not file.storage.exists.called\n assert not file.storage.open.called", "def _get_s3_object(self, s3_path):\n bucket_name, key = S3Util.get_bucket_and_key(s3_path)\n return self.s3_resource.Object(bucket_name, key)", "def get_url(self):\n try:\n return self._file.url\n except AttributeError:\n raise NotImplementedError(\"Underlying file does not have a URL.\")", "def test_retrieve_original_to_temp(self):\n # put file\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n src = os.path.join(self.upload_path, 'demo-test.tar.gz')\n id = utils.generate_id('demo-test.tar.gz')\n backend.put(src, id)\n\n # retrieve file\n result = backend.retrieve_original(id, self.tmp_path)\n expected_dst = os.path.join(self.tmp_path, id, 'demo-test.tar.gz')\n self.assertEquals(expected_dst, result)\n self.assertTrue(os.path.exists(expected_dst))", "def __check(s3client, key, bucket_name):\n try:\n s3client.head_object(Bucket=bucket_name, Key=key)\n except ClientError as e:\n return int(e.response['Error']['Code']) != 404\n return True", "def _upload_s3(self, filename, bucket, objectKey):\n return s3_client.upload_file(filename, bucket, objectKey)", "def test_s3_bucket_exists(self) -> None:\n if self.prod_env:\n bucket_name = 'saints-xctf-credentials-prod'\n else:\n bucket_name = 'saints-xctf-credentials-dev'\n\n s3_bucket = self.s3.list_objects(Bucket=bucket_name)\n return s3_bucket.get('Name') == bucket_name", "def does_bucket_exists( bucket_name ):\n bucket_exists_status = { 'status':False, 'error_message':'' }\n try:\n s3 = boto3.resource('s3')\n s3.meta.client.head_bucket( Bucket = bucket_name )\n bucket_exists_status['status'] = True\n except ClientError as e:\n if e.response['Error']['Code'] == \"404\":\n bucket_exists_status['status'] = False\n bucket_exists_status['error_message'] = str(e)\n else:\n # logger.error('ERROR: {0}'.format( str(e) ) )\n bucket_exists_status['status'] = False\n bucket_exists_status['error_message'] = str(e)\n return bucket_exists_status", "def _get_aws_s3_connection(cls, access_key, secret_access_key):\n return boto.connect_s3(access_key, secret_access_key)", "def get_url(url: str) -> Optional[str]:\n try:\n parsed = urlparse(url)\n except ValueError:\n return None\n\n if parsed.scheme in (\"file\", \"\"):\n return unquote(parsed.path)\n elif parsed.scheme in (\"http\", \"https\"):\n if url.startswith(\"https://open.spotify.com/image/\"):\n url = \"https://i.scdn.co/image/\" + url[len(\"https://open.spotify.com/image/\") :]\n\n name = hashlib.sha1(url.encode(\"utf-8\")).hexdigest()\n path = os.path.join(CACHE_PATH, name) + Path(parsed.path).suffix\n\n if os.path.isfile(path):\n info(f\"Already downloaded at {path}\")\n return path\n\n # Download the file to our cache. We should probably do this asynchronously,\n # but rely on the fact that the remote server is _probably_ fast enough.\n warning(f\"Downloading {url} -> {path}\")\n try:\n os.makedirs(CACHE_PATH, exist_ok=True)\n with urlopen(url) as read:\n with open(path, \"wb\") as write:\n while chunk := read.read(2048):\n write.write(chunk)\n\n return path\n except Exception as e:\n critical(\"Error getting image \" + str(e))\n\n try:\n os.remove(path)\n except:\n pass\n\n return None\n else:\n return None", "def test_get_file_exists_caching_with_raw_url(self):\n repository = self.remote_repository\n\n self.spy_on(repository._get_file_exists_uncached,\n op=kgb.SpyOpReturn(True))\n\n # Use spy to put key into cache\n self.assertTrue(repository.get_file_exists('PATH', 'd7e96b3'))\n\n # Remove spy to ensure key is still in cache without needing spy\n repository._get_file_exists_uncached.unspy()\n self.assertTrue(repository.get_file_exists('PATH', 'd7e96b3'))\n\n # Does not exist when raw_file_url changed because it is not cached.\n repository.raw_file_url = \\\n 'http://github.com/api/v2/yaml/blob/show/reviewboard/<revision>'\n\n self.assertFalse(repository.get_file_exists('PATH', 'd7e96b3'))", "def get_s3_client():\n return boto3.resource('s3')", "def sync_to_bucket(s3_url,\n region='eu-west-1',\n profile_name=None):\n\n parsed_s3_url = urlparse.urlparse(s3_url);\n\n bucket_name = parsed_s3_url.hostname;\n key_prefix = parsed_s3_url.path;\n if key_prefix[0] == '/':\n key_prefix = key_prefix[1:]\n if key_prefix[-1] != '/':\n key_prefix = key_prefix + '/'\n\n def inner(fn_inner):\n \"\"\"\n Decorator function function sent in should be having signature\n func(None,None, XmlDoc) and should yield JSON document one for\n each file that should be persisted to S3\n \"\"\"\n\n def handler(event, context):\n \"\"\"\n The AWS Lambda Entry Point\n \"\"\"\n s3conn = s3.connect_to_region(region, profile_name=profile_name)\n bucket = s3conn.get_bucket(bucket_name)\n\n # Use a map to track keys that are no longer in the feed, used for deletion\n remaining_keys = { key.name : True for key in bucket.list(prefix=key_prefix)}\n\n logger.debug(\"Existing keys in bucket\\n%s\", '\\n'.join(remaining_keys));\n\n for id, json_data in fn_inner():\n key_name = key_prefix + str(uuid.uuid5(uuid.NAMESPACE_URL, id.encode('utf-8')))\n\n # Key found, delete it from cleanup map\n if key_name in remaining_keys:\n del remaining_keys[key_name]\n\n string_data = json.dumps(json_data)\n s3_object = bucket.get_key(key_name)\n if s3_object == None:\n key = bucket.new_key(key_name);\n key.set_contents_from_string(string_data)\n logger.info('Creating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n else:\n if s3_object.etag[1:len(s3_object.etag)-1] != s3etag.from_string(string_data):\n logger.info('Updating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n s3_object.set_contents_from_string(string_data)\n else:\n logger.info('Same:\\ts3://%s/%s', bucket_name, key_name);\n logger.debug(string_data)\n\n # Remvoe remaining keys from the bucket to allow for cleanup\n for key in remaining_keys:\n logger.info('Removing:\\ts3://%s/%s', bucket_name, key);\n bucket.delete_key(key);\n\n logger.info('Done');\n\n return handler\n\n return inner", "def _init():\n cache_file = _get_buckets_cache_filename()\n exp = time.time() - S3_CACHE_EXPIRE\n\n # check mtime of the buckets files cache\n metadata = None\n try:\n if os.path.getmtime(cache_file) > exp:\n metadata = _read_buckets_cache_file(cache_file)\n except OSError:\n pass\n\n if metadata is None:\n # bucket files cache expired or does not exist\n metadata = _refresh_buckets_cache_file(cache_file)\n\n return metadata", "def test_parse_url(self):\n filename = 'demo-file.tar.gz'\n backend = BackendS3(**self.config)\n pb = PathBuilder('123456')\n base_url = backend.get_url()\n id = utils.generate_id(filename)\n parts = backend.id_to_path(id)\n path = '/'.join(parts)\n object_url = base_url + '/' + path + '/'\n original = object_url + filename\n crop_filename = pb.get_auto_crop_filename(id, '100x100', 'fit', 'jpg')\n resize = object_url + crop_filename\n result1 = backend.parse_url(original)\n result2 = backend.parse_url(resize)\n self.assertEquals(id, result1[0])\n self.assertEquals(filename, result1[1])\n self.assertEquals(id, result2[0])\n self.assertEquals(crop_filename, result2[1])", "def create_presigned_url(bucket_name, object_name, expiration=3600):\n\n # Generate a presigned URL for the S3 object\n s3_client = boto3.client('s3')\n try:\n response = s3_client.generate_presigned_url('get_object',\n Params={'Bucket': bucket_name,\n 'Key': object_name},\n ExpiresIn=expiration)\n except ClientError as e:\n logging.error(e)\n return None\n\n # The response contains the presigned URL\n return response", "def pushToS3()-> None:\n logging.info(f\"Connecting to s3 {getTime()}\")\n s3 = boto3.client(\"s3\",endpoint_url=\"http://localhost:4566\")\n if(not s3.head_bucket(Bucket=\"demo\")):\n s3.create_bucket(Bucket='demo')\n try:\n logging.info(f\"Uploading to s3 {getTime()}\")\n s3.upload_file(\"result.csv\",\"demo\",\"result.csv\")\n logging.info(f\"Finished uploding to s3 {getTime()}\")\n except ClientError as e:\n logging.error(f\"Error uploading file to S3 {getTime()}\")", "def get_profile_picture_url(cls, filename):\n if filename is None:\n return None\n profile_picture = bucket.blob('images/users/'+filename)\n if profile_picture.exists():\n profile_picture.make_public()\n return profile_picture.public_url\n return None", "def create_presigned_url(bucket_name, object_name):\n\n logger = logging.getLogger(\"SimpleReplayLogger\")\n\n s3_client = boto3.client('s3')\n try:\n response = s3_client.generate_presigned_url('get_object',\n Params={'Bucket': bucket_name,\n 'Key': object_name},\n ExpiresIn=604800)\n except ClientError as e:\n logger.error(f\"Unable to generate presigned url for object {object_name} in bucket {bucket_name}. {e}\")\n return None\n\n return response", "def s3_get(url, temp_file, proxies=None):\n\ts3_resource = boto3.resource (\"s3\", config=Config (proxies=proxies))\n\tbucket_name, s3_path = split_s3_path (url)\n\ts3_resource.Bucket (bucket_name).download_fileobj (s3_path, temp_file)" ]
[ "0.6728675", "0.6340867", "0.6091045", "0.6061678", "0.6043561", "0.60391515", "0.6009251", "0.59960645", "0.5973066", "0.5970535", "0.5962591", "0.5951693", "0.59505904", "0.59455204", "0.59345853", "0.5928199", "0.59084433", "0.5899881", "0.5891643", "0.5853361", "0.58446634", "0.58237094", "0.58051544", "0.57983965", "0.5775388", "0.57575995", "0.57468706", "0.56776404", "0.567272", "0.56546897", "0.5645835", "0.56333816", "0.5630126", "0.5629657", "0.5627725", "0.5625706", "0.5625287", "0.5624312", "0.5609043", "0.5603594", "0.5589363", "0.5575538", "0.5574232", "0.55706435", "0.5564152", "0.5559725", "0.554166", "0.5538791", "0.5524316", "0.5521888", "0.5521888", "0.5518277", "0.55179775", "0.55132055", "0.55109006", "0.55107474", "0.5508266", "0.5502859", "0.5496319", "0.54878825", "0.548445", "0.548445", "0.54824847", "0.5480096", "0.5477037", "0.5465628", "0.54653496", "0.5464967", "0.54623294", "0.54623294", "0.5460012", "0.5459326", "0.5459326", "0.5455177", "0.54508734", "0.5443512", "0.5441562", "0.5428074", "0.54228204", "0.54222834", "0.54205716", "0.54086965", "0.5401928", "0.5398775", "0.5395034", "0.53907853", "0.5384629", "0.5374268", "0.5371045", "0.53672284", "0.53657323", "0.5335892", "0.5329857", "0.53273565", "0.5326835", "0.5320473", "0.5312711", "0.5306856", "0.5299142", "0.529803" ]
0.6010111
6
Uploads the SVG file to S3, and returns the URL of the object
def upload_svg(filename, xml_string): s3 = boto3.client('s3') response = s3.put_object( ACL='public-read', Body=xml_string, Bucket=BUCKET, Key=filename, StorageClass='REDUCED_REDUNDANCY', ) return 'https://s3.amazonaws.com/%s/%s' % (BUCKET, filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upload(file_path, aws_path, access_key, secret_key) -> None:\n # bucket = \"dev-com-courtlistener-storage\"\n bucket = \"seals.free.law\"\n client = boto3.client(\n \"s3\",\n aws_access_key_id=access_key,\n aws_secret_access_key=secret_key,\n )\n transfer = S3Transfer(client)\n if \".png\" in file_path:\n content_type = \"image/png\"\n else:\n content_type = \"image/svg+xml\"\n transfer.upload_file(\n file_path,\n bucket,\n aws_path,\n extra_args={\"ContentType\": content_type, \"ACL\": \"public-read\"},\n )\n print(f\"http://{bucket}.s3-us-west-2.amazonaws.com/{aws_path}\")", "def url_for(filename):\n return \"{}{}\".format(S3_LOCATION, filename)", "def upload_file_to_s3(self, file_data):\r\n\r\n file_key = file_data.name + datetime.now(UTC).strftime(\r\n xqueue_interface.dateformat\r\n )\r\n\r\n file_data.seek(0)\r\n s3_public_url = upload_to_s3(\r\n file_data, file_key, self.s3_interface\r\n )\r\n\r\n return s3_public_url", "def _upload_s3(self, filename, bucket, objectKey):\n return s3_client.upload_file(filename, bucket, objectKey)", "def get_s3_url(iid):\n return \"http://%s.s3-website.%s.amazonaws.com/%s\" % (\n BUCKET_NAME,\n AWS_CLIENT_CONFIG['region_name'],\n iid\n )", "def do_s3_static_url(parser, token):\n return do_s3_media_url(parser, token, static=True)", "def url(cls, bucket, path):\n if path.startswith('/'):\n path = path[1:]\n if bucket.startswith('http://') or bucket.startswith('https://'):\n url = bucket\n else:\n url = cls.S3_BASE + bucket\n if not url.endswith('/'):\n url += '/'\n return url + path", "def generate_url(self, path):\n return AWS_S3_BUCKET_URL.format(bucket=self.bucket, path=path)", "def upload_from_path_to_s3(file_path):\n bucket_name = \"alp-reports-lambda\"\n environment = \"dev\" if os.environ.get('LOCAL') else \"prod\"\n object_key = f\"{environment}/835/{file_path.split('/')[-1]}\"\n\n s3 = boto3.resource('s3')\n s3.Object(bucket_name, object_key).upload_file(file_path, ExtraArgs={'ACL': 'public-read'})\n\n return f\"https://s3.amazonaws.com/{bucket_name}/{object_key}\"", "def sign_url(self, url, expiration=None):\n if not expiration:\n expiration = self._s3_presigned_url_expiration\n\n bucket, key = self.split_url(url)\n url = self.client.generate_presigned_url(\n 'get_object',\n ExpiresIn=int(expiration),\n Params={\n 'Bucket': bucket,\n 'Key': key\n }\n )\n\n return url", "def _s3_stash(self):\n s3_url = 's3://{}/{}'.format(BUCKET, self.atom_file)\n bucketpath = BUCKET.strip(\"/\")\n bucketbase = BUCKET.split(\"/\")[0]\n parts = urlparse.urlsplit(s3_url)\n mimetype = 'application/xml' \n \n conn = boto.connect_s3()\n\n try:\n bucket = conn.get_bucket(bucketbase)\n except boto.exception.S3ResponseError:\n bucket = conn.create_bucket(bucketbase)\n self.logger.info(\"Created S3 bucket {}\".format(bucketbase))\n\n if not(bucket.get_key(parts.path)):\n key = bucket.new_key(parts.path)\n key.set_metadata(\"Content-Type\", mimetype)\n key.set_contents_from_filename(self.atom_file)\n msg = \"created {0}\".format(s3_url)\n self.logger.info(msg)\n else:\n key = bucket.get_key(parts.path)\n key.set_metadata(\"Content-Type\", mimetype)\n key.set_contents_from_filename(self.atom_file)\n msg = \"re-uploaded {}\".format(s3_url)\n self.logger.info(msg)", "def sync_to_s3(pathname, bucket):\n BUCKET_MANAGER.sync(pathname, bucket)\n print(BUCKET_MANAGER.get_bucket_url(BUCKET_MANAGER.s3.Bucket(bucket)))", "def upload_file_and_return_url(self, file_name, name_on_storage, **additional_params):\n assets_bucket = self.storage_client.bucket(\"car_assets\")\n blob = assets_bucket.blob(name_on_storage)\n blob.upload_from_filename(file_name, **additional_params)\n return blob.public_url", "def save_image_to_s3(filename, file):\n \n s3_path = f\"s3://shopifyimagerepository/{filename}\"\n \n s3_client.put_object(Body=file,\n Bucket=\"shopifyimagerepository\",\n Key=filename,\n ACL=\"public-read\")", "def upload_object(self, file_path, s3_path):\n logging.info(\"Uploading file to \\\"{}\\\" to S3\".format(s3_path))\n bucket_name, key = S3Util.get_bucket_and_key(s3_path)\n self.s3_resource.Bucket(bucket_name).upload_file(file_path, key)", "def _upload_to_s3(filename):\n if not app.config.get('UPLOAD_SCREENSHOTS_TO_S3', False):\n return\n\n import boto\n from boto.s3.key import Key\n conn = boto.connect_s3()\n b = conn.get_bucket(app.config['S3_BUCKET'])\n k = Key(b)\n k.key = '{}/{}'.format(\n app.config.get('S3_FILES_PREFIX', 'sleepypuppy'),\n filename\n )\n k.set_contents_from_filename(\n \"{}/{}\".format(\n app.config['UPLOAD_FOLDER'],\n filename\n )\n )\n os.remove(\n \"{}/{}\".format(\n app.config['UPLOAD_FOLDER'],\n filename\n )\n )", "def s3(ctx, bucket_name, data_file, region):\n ctx.obj['BUCKET_NAME'] = bucket_name\n ctx.obj['DATA_FILE'] = data_file\n ctx.obj['TYPE'] = 's3'\n ctx.obj['REGION'] = region", "def generate_presigned_url(file_path):\n\n session = boto3.session.Session(\n aws_access_key_id=AWS_ACCESS_KEY_ID,\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY,\n region_name='eu-central-1')\n s3Client = session.client('s3', config=Config(signature_version='s3v4'))\n\n # Create a URL valid for 30 seconds.\n return s3Client.generate_presigned_url('get_object',\n Params={\n 'Bucket':\n AWS_STORAGE_BUCKET_NAME,\n 'Key':\n file_path},\n ExpiresIn=30)", "def get_s3_url(self, bucket=None, region=None):\n \n if bucket is None:\n bucket = self.AWS_S3_BUCKET\n \n if region is None:\n region = self.AWS_S3_REGION\n \n return \"https://{}.s3.{}.amazonaws.com/\".format(bucket, region)", "def image_url(self, name):\r\n s3_key = self._generate_s3_key(name)\r\n return s3_key.generate_url(self.IMAGE_LINK_DURATION)", "def s3_url(row):\n return f's3://{row[\"Bucket\"]}/{row[\"Key\"]}'", "def presign(self, s3uri, **kwargs):\n return self.exec_command('presign %s' % (s3uri), **kwargs)[0].strip()", "def save_file_aws(obj, file_path, aws_credentials):\n bucket_engine = S3Bucket(*aws_credentials)\n data = gzip.compress(json.dumps(obj).encode('utf-8'))\n bucket_engine.write(file_path, data)", "def save_svg(xml_string, checksum=None):\n if checksum is None:\n checksum = get_checksum(xml_string) # Get checksum of this file\n existing_url = is_duplicate_checksum(checksum) # Make sure it's unique\n if existing_url is not None: # We've generated this file before.\n logger.info('Duplicate detected for %s' % checksum)\n return existing_url # If dupe_check has a value, it's a URL to an existing (duplicate) file.\n\n # Usually, we've already checked for a duplicate - the above logic is just for cases\n # where we need to generate the checksum on the backend\n filename = get_filename(checksum)\n url = upload_svg(filename, xml_string)\n return url", "def get_url(self, name):\n if self.folder.type != 's3':\n return super(NereidStaticFile, self).get_url(name)\n\n cloudfront = config.get('nereid_s3', 'cloudfront')\n if cloudfront:\n return '/'.join([cloudfront, self.s3_key])\n\n return \"https://s3.amazonaws.com/%s/%s\" % (\n config.get('nereid_s3', 'bucket'), self.s3_key\n )", "def _get_s3_object(self, s3_path):\n bucket_name, key = S3Util.get_bucket_and_key(s3_path)\n return self.s3_resource.Object(bucket_name, key)", "def _generate_s3_url(self, bucket, path):\n key = path\n\n # NOTE: path can be an empty string meaning that\n # we need to generate a URL pointing at the root directory of the bucket.\n # However, boto3 doesn't allow us to pass the key as an empty string.\n # As a workaround we set it to a dummy string and later remove it from the generated URL\n if not path:\n key = 'dummy'\n\n url = self._s3_link_client.generate_presigned_url(\n 'get_object',\n ExpiresIn=0,\n Params={\n 'Bucket': bucket,\n 'Key': key\n }\n )\n\n # If the path was an empty string we need to strip out trailing dummy string ending up with a URL\n # pointing at the root directory of the bucket\n if not path:\n url = url.replace('/' + key, '/')\n\n return url", "def s3resource(self):\n return self._s3resource", "def store_provider_url(self, region: str, url: str):\n return s3client.upload_file_from_url(url, self.provider_name() + \"/\" + region + \"/\" +\n datetime.datetime.utcnow().isoformat() + \".json\")", "def pushToS3()-> None:\n logging.info(f\"Connecting to s3 {getTime()}\")\n s3 = boto3.client(\"s3\",endpoint_url=\"http://localhost:4566\")\n if(not s3.head_bucket(Bucket=\"demo\")):\n s3.create_bucket(Bucket='demo')\n try:\n logging.info(f\"Uploading to s3 {getTime()}\")\n s3.upload_file(\"result.csv\",\"demo\",\"result.csv\")\n logging.info(f\"Finished uploding to s3 {getTime()}\")\n except ClientError as e:\n logging.error(f\"Error uploading file to S3 {getTime()}\")", "def upload_to_s3(file_from_machine, bucket, file_to_s3):\n s3.upload_file(file_from_machine, bucket, file_to_s3)\n print(file_to_s3, \" : is upoaded to s3\")", "def get_s3_filepath(image_id, prefix=\"\", filetype=\"png\"):\n bucket_name = os.environ[\"LABELED_BUCKET_NAME\"]\n key_name = \"{prefix}/{id}.{suffix}\".format(\n prefix=prefix, id=image_id, suffix=filetype)\n url = \"https://s3.amazonaws.com/{bucket}/{key}\".format(\n bucket=bucket_name, key=key_name)\n return url", "def image_upload(request, filename, format=None):\n file_obj = request.data['file']\n #filename = request.data['filename']\n #filename = request.FILES['filename'].name\n\n timestamp = datetime.now()\n extension = filename[-4:]\n newfilename = timestamp.strftime(\"%Y%m%d%H%M%S\") + extension\n savedir = 'img/' + newfilename\n contentType = filename[-3:]\n \n path = default_storage.save(savedir, ContentFile(file_obj.read()))\n success = True\n error = \"\"\n\n try:\n s3 = boto3.client('s3')\n #s3.upload_file(path, 'demo-poppag-s3-bucket-2020', savedir, ExtraArgs={'ContentType': \"image/\" + contentType, 'ACL': \"public-read\"})\n s3.upload_file(path, S3BUCKET, savedir, ExtraArgs={'ContentType': \"image/\" + contentType, 'ACL': \"public-read\"})\n\n print(\"Upload Successful\")\n success = True\n except FileNotFoundError:\n print(\"The file was not found\")\n error = \"This file was not found\"\n success = False\n except NoCredentialsError:\n error = \"Credentials not available\"\n success = False\n\n #path = default_storage.save('tmp/somename.png', ContentFile(request.FILES[\"filename\"]))\n #tmp_file = os.path.join(settings.MEDIA_ROOT, path)\n\n uploaded = \"https://demo-poppag-s3-bucket-2020.s3.us-west-1.amazonaws.com/\" + savedir\n\n return JsonResponse({ \"filename\" : uploaded, \"success\" : success, error : error})", "def upload_to_s3(self, file: str, force_upload: bool = False) -> str:\n if self.aws_access_key_id is None:\n raise Exception(\n 'To use `upload_to_s3` you need to pass '\n '`aws_access_key_id` and '\n '`aws_secret_access_key`'\n )\n\n filename = file.split('/')[-1]\n\n s3 = boto3.client(\n 's3',\n aws_access_key_id=self.aws_access_key_id,\n aws_secret_access_key=self.aws_secret_access_key\n )\n # Check if exists\n if not force_upload:\n try:\n session = boto3.Session(\n aws_access_key_id=self.aws_access_key_id,\n aws_secret_access_key=self.aws_secret_access_key\n )\n\n session.resource('s3').Object(self.bucket_name, filename).load()\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] != \"404\":\n raise e\n else:\n # The object does exist\n return filename\n\n # Progress bar\n size = os.stat(file).st_size\n progress_bar = self._progress(size)\n\n # Uploading file\n s3.upload_file(file, self.bucket_name, filename, Callback=progress_bar)\n\n return filename", "def make_s3(sitename):\n return s3.S3(sitename)", "def get_image(filename):\n\n client.download_file(S3_BUCKET, filename, 'uploads/{}'.format(filename))", "def write_to_s3(df, bucket, path):\n pass", "def _get_gcs_file_url(self, filepath: str) -> str:\n # Upload to GCS bucket with filepath\n # \"<entity>/<entity-id>/assets/<filepath>\".\n gcs_file_url = '%s/%s' % (self._assets_path, filepath)\n return gcs_file_url", "def get_s3_object(self, remote_s3_url):\n try:\n _file = tempfile.mkstemp()[1]\n parsed_s3_path = remote_s3_url.split(\"/\", 3) # s3://bucket-name/key\n remote_bucket = parsed_s3_path[2] # Bucket name\n remote_key = parsed_s3_path[3] # Key\n self.download_file(remote_bucket, remote_key, _file)\n return _file\n except Exception as e:\n message = {'FILE': __file__.split('/')[-1],\n 'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}\n self.logger.exception(message)\n raise", "def _s3_get_file(url):\n try:\n return S3().get_contents_from_url(url)\n except Exception as e:\n raise ScrBaseException(\"Could not load file from {0}: {1}\".format(url, e))", "def project_uploader():\n if not current_app.config['S3_KEY']:\n return ''\n if len(request.files) == 0:\n return 'No files selected'\n img = request.files['file']\n if not img or img.filename == '':\n return 'No filename'\n ext = img.filename.split('.')[-1].lower()\n if ext not in ACCEPTED_TYPES:\n return 'Invalid format (allowed: %s)' % ','.join(ACCEPTED_TYPES)\n # generate a simpler filename\n keepcharacters = ('.', '_')\n safe_filename = img.filename.replace(' ', '_')\n safe_filename = \"\".join(\n c for c in safe_filename\n if c.isalnum() or c in keepcharacters).rstrip()\n if not safe_filename:\n safe_filename = \"\".join(random_password(8), '.', ext)\n # use random subfolder inside user id folder\n filename = '/'.join([\n str(current_user.id),\n random_password(24),\n safe_filename\n ])\n # with tempfile.TemporaryDirectory() as tmpdir:\n # img.save(path.join(tmpdir, filename))\n if 'S3_FOLDER' in current_app.config:\n s3_filepath = '/'.join([current_app.config['S3_FOLDER'], filename])\n else:\n s3_filepath = filename\n # print('Uploading to %s' % s3_filepath)\n if 'S3_ENDPOINT' in current_app.config:\n s3_obj = boto3.client(\n service_name='s3',\n endpoint_url=current_app.config['S3_ENDPOINT'],\n aws_access_key_id=current_app.config['S3_KEY'],\n aws_secret_access_key=current_app.config['S3_SECRET'],\n )\n #print('Uploading to endpoint %s' % current_app.config['S3_ENDPOINT'])\n else:\n s3_obj = boto3.client(\n service_name='s3',\n region_name=current_app.config['S3_REGION'],\n aws_access_key_id=current_app.config['S3_KEY'],\n aws_secret_access_key=current_app.config['S3_SECRET'],\n )\n #print('Uploading to region %s' % current_app.config['S3_REGION'])\n # Commence upload\n s3_obj.upload_fileobj(img,\n current_app.config['S3_BUCKET'],\n s3_filepath,\n ExtraArgs={'ContentType': img.content_type,\n 'ACL': 'public-read'}\n )\n return escape('/'.join([current_app.config['S3_HTTPS'], s3_filepath]))", "def get_blob_url(self, download_meta):\n bucket_name, key = self._get_bucket_key(download_meta)\n location = self.s3.generate_presigned_url(\n ClientMethod='get_object',\n ExpiresIn=36*60*60,\n Params={'Bucket': bucket_name, 'Key': key})\n return location", "def s3_etag(url, proxies=None):\n\ts3_resource = boto3.resource (\"s3\", config=Config (proxies=proxies))\n\tbucket_name, s3_path = split_s3_path (url)\n\ts3_object = s3_resource.Object (bucket_name, s3_path)\n\treturn s3_object.e_tag", "def upload(self, path, data, headers={}):\n\n client = AsyncHTTPClient()\n method = 'PUT'\n url = self.generate_url(path)\n url_object = urlparse(url)\n params = {\n 'SignatureMethod': 'AWS4-HMAC-SHA256'\n }\n\n headers.update({\n 'Content-Length': str(len(data)),\n 'Content-Type': self._guess_mimetype(path),\n 'Date': self._rfc822_datetime(),\n 'Host': url_object.hostname,\n 'X-Amz-Content-sha256': hashlib.sha256(data).hexdigest(),\n })\n\n try:\n response = yield client.fetch(\n self.sign_request(\n url_object.hostname,\n url_object.path,\n params,\n headers,\n method,\n data\n ),\n method=method,\n body=data,\n connect_timeout=AWS_S3_CONNECT_TIMEOUT,\n request_timeout=AWS_S3_REQUEST_TIMEOUT,\n headers=headers\n )\n except HTTPError as error:\n log.error(error)\n if error.response:\n log.error(error.response.body)\n raise Return(None)\n\n raise Return(response)", "def get_presigned_url_for_download(file):\n url = S3_CLIENT.generate_presigned_url(\n ClientMethod='get_object',\n Params={\n 'Bucket': runtime_context.BUCKET_NAME,\n 'Key': file['id'],\n 'ResponseContentDisposition': 'attachment; filename=\"{}\"'.format(file['name']),\n 'ResponseContentType': file['type']\n },\n ExpiresIn=runtime_context.EXPIRATION\n )\n LOGGER.debug('Presigned URL generated. service=s3 method=get_object id={}'.format(file['id']))\n return url", "def upload(filename, bucket):\n print(\"Uploading {} to S3\".format(filename.lower().replace('_', '-')))\n url = \"https://s3.ca-central-1.amazonaws.com/{}/{}\".format(bucket,\n filename.lower().replace('_', '-'))\n with open('{}/{}'.format(WORK_DIR, filename), 'rb') as data:\n requests.put(url, data=data)", "def get_s3_file(self, no_copy=False):\n return self.get_file(uri_type=URI_S3, no_copy=no_copy)", "def store_to_s3():\n\n try:\n # establish aws/s3 connection\n s3 = boto3.client('s3',\n aws_access_key_id=ACCESS_KEY,\n aws_secret_access_key=SECRET_KEY\n )\n logger.info(\"S3 connection established!\")\n except Exception as e:\n logger.error('Fail to connect to aws s3. Please check your credentials!')\n logger.error(e)\n else:\n try:\n # upload local file to S3 bucket\n logger.info(\"Uploading {} to {} bucket as {}\".format(config.Local_File_To_Upload,\n config.Bucket_Name,\n config.S3_Filename))\n s3.upload_file(config.Local_File_To_Upload,\n config.Bucket_Name,\n config.S3_Filename)\n logger.info('File successfully uploaded to S3 bucket!')\n except FileNotFoundError:\n logger.error('File not found, pleas check the file path.')\n except Exception as e:\n logger.error(e)", "def create_presigned_url(bucket_name, object_name, expiration=3600):\n\n # Generate a presigned URL for the S3 object\n s3_client = boto3.client('s3')\n try:\n response = s3_client.generate_presigned_url('get_object',\n Params={'Bucket': bucket_name,\n 'Key': object_name},\n ExpiresIn=expiration)\n except ClientError as e:\n print (e)\n return None\n\n # The response contains the presigned URL\n return response", "def upload_to_s3(file_path, config):\n logging.info(\"Uploading file to S3 bucket: %s\", config['s3_bucket_name'])\n s3 = boto3.resource('s3')\n s3_filename = config['s3_bucket_path'] + config['rendered_filename']\n s3.Bucket(config['s3_bucket_name']).upload_file(\n file_path, s3_filename, ExtraArgs={\n 'ContentType': 'text/html', 'ACL': 'public-read'})", "def url(self, bucket, path):\n custom_url = bucket.startswith('http://') or bucket.startswith('https://')\n\n if isinstance(path, list):\n # This is a list of key components that need to be quoted\n # and assembled.\n path = self.key_join(path, encode=custom_url)\n if isinstance(path, bytes):\n path = path.decode(\"utf-8\")\n if path.startswith('/'):\n path = path[1:]\n\n if custom_url:\n url = bucket\n\n if not url.endswith('/'):\n url += '/'\n\n return url + path\n else:\n url = self._generate_s3_url(bucket, path)\n\n return url", "def upload_object(object_location: ObjectLocation, stream: io.BytesIO) -> None:\n s3 = boto3.client(\"s3\")\n result = s3.upload_fileobj(stream, object_location.bucket.name, object_location.key)\n log.debug(f\"Result of upload to {object_location}: {result}\")", "def put(self, account=None, user=None, account_id=None):\n file = request.files.get('file')\n filename = f\"{account_id}/avatar.img\"\n\n engine = S3Engine()\n url = engine.put_object(filename, file.read())\n Account.update(vertex_id=account_id,\n validated_data={\"avatarLink\": url})\n\n return jsonify_response({\n \"id\": account.id,\n \"title\": account.title,\n \"avatarLink\": url\n })", "def createObject(bucket:str, object:str, region:str, path:Path) -> None:\n data = path.read_bytes()\n client = boto3.client('s3', region_name=region)\n client.upload_fileobj(\n data,\n bucket,\n object,\n #Callback=callback\n )", "def file_url(self, fname):\n gs_url = f\"{self.gs_base_url}/{fname}\"\n return f\"{gs_url}\"", "def gcloud_upload_file(file):\n if not file:\n return None\n\n public_url = storage.upload_file(\n file.read(),\n file.filename,\n file.content_type\n )\n\n current_app.logger.info(\n \"Uploaded file %s as %s.\", file.filename, public_url)\n\n return public_url", "def sync_to_bucket(s3_url,\n region='eu-west-1',\n profile_name=None):\n\n parsed_s3_url = urlparse.urlparse(s3_url);\n\n bucket_name = parsed_s3_url.hostname;\n key_prefix = parsed_s3_url.path;\n if key_prefix[0] == '/':\n key_prefix = key_prefix[1:]\n if key_prefix[-1] != '/':\n key_prefix = key_prefix + '/'\n\n def inner(fn_inner):\n \"\"\"\n Decorator function function sent in should be having signature\n func(None,None, XmlDoc) and should yield JSON document one for\n each file that should be persisted to S3\n \"\"\"\n\n def handler(event, context):\n \"\"\"\n The AWS Lambda Entry Point\n \"\"\"\n s3conn = s3.connect_to_region(region, profile_name=profile_name)\n bucket = s3conn.get_bucket(bucket_name)\n\n # Use a map to track keys that are no longer in the feed, used for deletion\n remaining_keys = { key.name : True for key in bucket.list(prefix=key_prefix)}\n\n logger.debug(\"Existing keys in bucket\\n%s\", '\\n'.join(remaining_keys));\n\n for id, json_data in fn_inner():\n key_name = key_prefix + str(uuid.uuid5(uuid.NAMESPACE_URL, id.encode('utf-8')))\n\n # Key found, delete it from cleanup map\n if key_name in remaining_keys:\n del remaining_keys[key_name]\n\n string_data = json.dumps(json_data)\n s3_object = bucket.get_key(key_name)\n if s3_object == None:\n key = bucket.new_key(key_name);\n key.set_contents_from_string(string_data)\n logger.info('Creating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n else:\n if s3_object.etag[1:len(s3_object.etag)-1] != s3etag.from_string(string_data):\n logger.info('Updating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n s3_object.set_contents_from_string(string_data)\n else:\n logger.info('Same:\\ts3://%s/%s', bucket_name, key_name);\n logger.debug(string_data)\n\n # Remvoe remaining keys from the bucket to allow for cleanup\n for key in remaining_keys:\n logger.info('Removing:\\ts3://%s/%s', bucket_name, key);\n bucket.delete_key(key);\n\n logger.info('Done');\n\n return handler\n\n return inner", "def upload_to_bucket(blob_name, file, bucket_name):\n\n # Explicitly use service account credentials by specifying the private key\n # file.\n storage_client = storage.Client.from_service_account_json('creds.json')\n\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(blob_name)\n blob.upload_from_file(file)\n\n # returns a public url\n return blob.public_url", "def upload_object(self, file_path, destination, use_original_name=True, ExtraArgs=None):\n assert os.path.exists(file_path), \"File path does not exist {}\".format(file_path)\n bucket_name, save_path = self.split_name(destination)\n if use_original_name:\n save_path = os.path.join(save_path, os.path.basename(file_path))\n\n self.s3_client.upload_file(file_path, bucket_name, save_path, ExtraArgs=ExtraArgs)\n return os.path.join(bucket_name, save_path)", "def get_file_path(self, name):\n if self.folder.type != \"s3\":\n return super(NereidStaticFile, self).get_file_path(name)\n\n cloudfront = config.get('nereid_s3', 'cloudfront')\n if cloudfront:\n return '/'.join([cloudfront, self.s3_key])\n\n return \"https://s3.amazonaws.com/%s/%s\" % (\n config.get('nereid_s3', 'bucket'), self.s3_key\n )", "def create_presigned_url(bucket_name, object_name, expiration=3600):\n\n # Generate a presigned URL for the S3 object\n s3_client = boto3.client('s3')\n try:\n response = s3_client.generate_presigned_url('get_object',\n Params={'Bucket': bucket_name,\n 'Key': object_name},\n ExpiresIn=expiration)\n except ClientError as e:\n logging.error(e)\n return None\n\n # The response contains the presigned URL\n return response", "def _s3_origin(self):\n pipeline_builder = self.sdc_builder.get_pipeline_builder()\n s3_origin = pipeline_builder.add_stage('Amazon S3', type='origin')\n s3_origin.set_attributes(bucket=self.environments['aws'].s3_bucket_name,\n common_prefix='origin_data',\n prefix_pattern=f\"{DATASETS[self.dataset]['file_pattern']}\",\n data_format='DELIMITED',\n header_line='WITH_HEADER',\n delimiter_format_type='CUSTOM',\n delimiter_character=DATASETS[self.dataset]['delimiter'],\n number_of_threads=self.number_of_threads,\n max_batch_size_in_records=self.batch_size)\n return s3_origin, pipeline_builder", "def s3_resource(self):\n return boto3.resource('s3', \n aws_access_key_id=os.environ.get(\"MINIO_ACCESS_KEY\"),\n aws_secret_access_key=os.environ.get(\"MINIO_SECRET_KEY\"),\n endpoint_url=f'http://{os.environ.get(\"MINIO_SERVER\")}',\n config=Config(signature_version='s3v4')\n )", "def create_presigned_url(bucket_name, bucket_key, expiration=3600, signature_version=s3_signature['v4']):\n s3_client = boto3.client('s3',\n aws_access_key_id=AWS_ACCESS_KEY_ID,\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY,\n config=Config(signature_version=signature_version),\n region_name=AWS_DEFAULT_REGION\n )\n try:\n response = s3_client.generate_presigned_url('get_object',\n Params={'Bucket': bucket_name,\n 'Key': bucket_key},\n ExpiresIn=expiration)\n print(s3_client.list_buckets()['Owner'])\n for key in s3_client.list_objects(Bucket=bucket_name, Prefix=bucket_key)['Contents']:\n print(key['Key'])\n except ClientError as e:\n logging.error(e)\n return None\n # The response contains the presigned URL\n return response", "def download_file_from_s3_public_bucket(bucket, object, output_file):\n botocore_config = Config(signature_version=UNSIGNED)\n s3_client = boto3.client(\"s3\", config=botocore_config)\n s3_client.download_file(bucket, object, output_file)", "def s3(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"s3\")", "def create_presigned_url(bucket_name, object_name, expiration=3600):\n\n # Generate a presigned URL for the S3 object\n s3_client = boto3.client('s3')\n try:\n response = s3_client.generate_presigned_url(\n 'get_object',\n Params={'Bucket': bucket_name, 'Key': object_name},\n ExpiresIn=expiration\n )\n except ClientError as e:\n logging.error(e)\n return None\n\n # The response contains the presigned URL\n return response", "def upload_to(instance, filename):\n return upload_image_path(filename, 'products')", "def s3_etag(url):\n s3_resource = boto3.resource(\"s3\")\n bucket_name, s3_path = split_s3_path(url)\n s3_object = s3_resource.Object(bucket_name, s3_path)\n return s3_object.e_tag", "def s3_etag(url):\n s3_resource = boto3.resource(\"s3\")\n bucket_name, s3_path = split_s3_path(url)\n s3_object = s3_resource.Object(bucket_name, s3_path)\n return s3_object.e_tag", "def create_presigned_url(bucket_name, object_name, expiration=3600):\n\n # Generate a presigned URL for the S3 object\n s3_client = boto3.client('s3')\n try:\n response = s3_client.generate_presigned_url('get_object',\n Params={'Bucket': bucket_name,\n 'Key': object_name},\n ExpiresIn=expiration)\n except ClientError as e:\n logging.error(e)\n return None\n\n # The response contains the presigned URL\n return response", "def type(self):\n return 's3_file'", "def create_presigned_url(bucket_name, object_name):\n\n logger = logging.getLogger(\"SimpleReplayLogger\")\n\n s3_client = boto3.client('s3')\n try:\n response = s3_client.generate_presigned_url('get_object',\n Params={'Bucket': bucket_name,\n 'Key': object_name},\n ExpiresIn=604800)\n except ClientError as e:\n logger.error(f\"Unable to generate presigned url for object {object_name} in bucket {bucket_name}. {e}\")\n return None\n\n return response", "def upload_blob(bucket_name, source_file_name, destination_blob_name):\n # bucket_name = \"your-bucket-name\"\n # source_file_name = \"local/path/to/file\"\n # destination_blob_name = \"storage-object-name\"\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n\n blob.upload_from_filename(source_file_name)\n uri = f\"gs://{bucket_name}/{destination_blob_name}\"\n\n return uri", "def create_presigned_url(s3_uri, expiration=86400):\n\n bucket_name, object_name = split_uri(s3_uri)\n\n # Generate a presigned URL for the S3 object\n s3_client = boto3.client(\"s3\", config=botocore.config.Config(signature_version=\"s3v4\"))\n try:\n response = s3_client.generate_presigned_url(\n \"get_object\",\n Params={\"Bucket\": bucket_name, \"Key\": object_name},\n ExpiresIn=expiration,\n )\n except botocore.exceptions.ClientError as err:\n # Soft failure.\n logger.error(\"failed to generate presigned url: %s\", err)\n return None\n\n # The response contains the presigned URL\n return response", "def get_s3_object(bucket, key_name, local_file):\n\n tracer.put_metadata('object', f's3://{bucket}/{key_name}')\n\n try:\n s3_resource.Bucket(bucket).download_file(key_name, local_file)\n result = 'ok'\n tracer.put_annotation('OBJECT_DOWNLOAD', 'SUCCESS')\n except botocore.exceptions.ClientError as e:\n tracer.put_annotation('OBJECT_DOWNLOAD', 'FAILURE')\n if e.response['Error']['Code'] == '404':\n result = f'Error: s3://{bucket}/{key_name} does not exist'\n else:\n result = f'Error: {str(e)}'\n\n return(result)", "def upload_chain(s3_path, local_path, bucket_name='lwr-inverse-us-east'):\n s3 = boto3.resource(\"s3\")\n lwr_AIES = s3.Bucket(bucket_name)\n file_content = open(local_path, 'rb')\n lwr_AIES.put_object(Key=s3_path, Body=file_content)", "def save_to_s3(filename, contents):\n s3 = boto3.resource('s3')\n obj = s3.Object(BUCKET_NAME, S3_PATH.format(filename))\n obj.put(Body=contents)", "def save_uploaded_file(uploaded_file_object):\n client = _get_client()\n key = client.key(_FILE_ENTITY, uploaded_file_object.filename)\n entity = datastore.Entity(key)\n entity['url'] = uploaded_file_object.url\n client.put(entity)", "def url(self, url):\n return self.presigned_url(url)", "def upload_blob(fileName, source_file_name):\n # source_file_name = \"local/path/to/file\"\n # fileName = \"storage-object-name\"\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(\"images/%s\" % fileName)\n\n blob.upload_from_filename(source_file_name)\n blob.make_public()\n fileUrl = blob.public_url\n print(\n \"File {} uploaded.\".format(fileName),\n \"Blob {} is publicly accessible at {}\".format(\n blob.name, fileUrl)\n )\n return fileUrl", "def image_uri(path: str) -> str:\n return f\"{account_id()}.dkr.ecr.{default_session().region_name}.amazonaws.com/{path}\"", "def s3_to_local(path: str):\n out_path = sriracha.remote.s3_to_local(\n path, download_mode=DownloadMode.SIZE_AND_TIMESTAMP\n )\n click.echo(out_path)", "def gcs_url(keys, path, verb='GET', expiration_secs=1000, content_type=''):\n expiration = int(time.time() + expiration_secs)\n signed_url = sign_url(path, verb=verb, expiration = expiration,\n content_type=content_type,\n account_email=keys['client_email'],\n keytext=keys['private_key']\n )\n return signed_url", "def s3(self) -> Optional['outputs.DataRepositoryAssociationS3']:\n return pulumi.get(self, \"s3\")", "def publish(self):\n #vprint(\"PUBLISHING \",self.__dict__)\n \n js = self.compute_json()\n name = self.name\n #topicdir = \"/topicd/\" if constants.publishToS3Dev else \"/topic/\"\n s3path = constants.compositeDir+\"/\"+name+\"/main.json\" #the path where the page will finally end up\n s3.s3SetContents(s3path,contents=js,relativeTo=\"\",contentType=\"application/json\")\n self.genPage()", "def do_s3_media_url(parser, token, static=False):\n\n split_token = token.split_contents()\n vars = []\n as_var = False\n for k, v in enumerate(split_token[1:]):\n if v == 'as':\n try:\n while len(vars) < 1:\n vars.append(None)\n vars.append(split_token[k+2])\n as_var = True\n except IndexError:\n raise template.TemplateSyntaxError(\n \"%r tag requires a variable name to attach to\" \\\n % split_token[0]\n )\n break\n else:\n vars.append(v)\n\n if (not as_var and len(vars) not in (1,)) \\\n or (as_var and len(vars) not in (2,)):\n raise template.TemplateSyntaxError(\n \"%r tag requires a path or url\" \\\n % token.contents.split()[0]\n )\n\n return S3MediaURLNode(static, *vars)", "def s3_download(path):\n with s3_read(path):\n # Reading the file will cache the file locally.\n pass", "def upload_from_file(self, file_obj, name_on_storage, **keyword_args):\n blob = self.bucket.blob(name_on_storage)\n blob.upload_from_file(file_obj, **keyword_args)\n print(f\"Upload object {name_on_storage}\")", "def presign_url(self, endpoint_url, s3_url, timeout_hours=24, stance='download', n_parts=1):\n bucket_name, object_name = self._prep_presign(endpoint_url, s3_url)\n try:\n if stance == 'download':\n response = self.s3.generate_presigned_url(\n 'get_object',\n Params={'Bucket': bucket_name, 'Key': object_name},\n ExpiresIn=(timeout_hours * 60 * 60)\n )\n return response # The response contains the presigned URL\n elif stance == 'upload':\n response = self.s3.generate_presigned_post(\n bucket_name,\n object_name,\n ExpiresIn=(timeout_hours * 60 * 60)\n )\n return response\n elif stance == 'upload-multipart':\n s3util = S3MultipartUploadUtil(self, object_name)\n urls = [\n s3util.create_presigned_url(timeout_hours=timeout_hours)\n for _ in range(n_parts)\n ]\n return {\n 'urls': urls,\n 'upload_id': s3util.upload_id,\n }\n else:\n assert False, f'Stance \"{stance}\" is invlaid. Must be one of: \"upload\", \"download\", \"upload-multipart'\n except ClientError:\n logger.exception(\n 'create_presigned_url_exception',\n s3_url=s3_url,\n endpoint_url=endpoint_url,\n bucket=bucket_name,\n timeout_hours=timeout_hours,\n )\n return None", "def action(self):\n return blobstore.create_upload_url(self.upload_url)", "def create_presigned_url_expanded(objName):\n\n # Generate a presigned URL for the S3 client method\n s3_client = boto3.client('s3')\n try:\n response = s3_client.generate_presigned_url('get_object',\n Params={\n 'Bucket': 'ece1779-a3-bucket',\n 'Key': objName,\n },\n ExpiresIn=30)\n except ClientError as e:\n logging.error(e)\n return None\n\n # The response contains the presigned URL\n return response", "def create_presigned_url(object_name, bucket_name='oortcloud-test1', expiration=3600):\n\n # Generate a presigned URL for the S3 object\n sess = boto3.session.Session()\n\n s3_con_cli = sess.client(service_name='s3', region_name='eu-west-2')\n try:\n response = s3_con_cli.generate_presigned_url('get_object',\n Params={'Bucket': bucket_name,\n 'Key': object_name},\n ExpiresIn=expiration)\n except ClientError as e:\n logging.error(e)\n return None\n\n # The response contains the presigned URL\n return response", "def url(self):\n return self.get_upload_set().url(self.filename)", "def s3_location(self, value):\n info = urlparse(value)\n if info.scheme != \"s3\":\n raise ValueError(\"S3 location must be a valid s3 url\\tgot={0}\".format(value))\n\n bucket = info.netloc\n if not bucket:\n raise ValueError(\"S3 location must be a valid s3 url\\tgot={0}\".format(value))\n\n key = info.path\n return S3Location(bucket, key, value)", "def uploaded_file(filename):\n if app.config.get('UPLOAD_SCREENSHOTS_TO_S3', False):\n import boto\n from flask import redirect\n conn = boto.connect_s3()\n url = conn.generate_url(\n expires_in=long(60*60*2), # 2 hour expiry\n method='GET',\n bucket=app.config['S3_BUCKET'],\n key='{}/{}'.format(\n app.config.get('S3_FILES_PREFIX', 'sleepypuppy'),\n filename\n ),\n query_auth=True\n )\n url = _correct_s3_url(url)\n return redirect(url, 302)\n else:\n return send_from_directory(\n app.config['UPLOAD_FOLDER'],\n filename\n )", "def uploaded_file(filename):\n if app.config.get('UPLOAD_SCREENSHOTS_TO_S3', False):\n import boto\n from flask import redirect\n conn = boto.connect_s3()\n url = conn.generate_url(\n expires_in=long(60 * 60 * 2), # 2 hour expiry\n method='GET',\n bucket=app.config['S3_BUCKET'],\n key='{}/{}'.format(\n app.config.get('S3_FILES_PREFIX', 'sleepypuppy'),\n filename\n ),\n query_auth=True\n )\n url = _correct_s3_url(url)\n return redirect(url, 302)\n else:\n return send_from_directory(\n app.config['UPLOAD_FOLDER'],\n filename\n )", "def generate_aws_presigned_url_for_part(key, uploadId, partNumber, expires_in):\n try:\n bucket = flask.current_app.config[\"DATA_UPLOAD_BUCKET\"]\n except KeyError:\n raise InternalError(\n \"fence not configured with data upload bucket; can't create signed URL\"\n )\n s3_url = \"s3://{}/{}\".format(bucket, key)\n return S3IndexedFileLocation(s3_url).generate_presigned_url_for_part_upload(\n uploadId, partNumber, expires_in\n )", "def upload_example(request, object_id):\n \n example = get_object_or_404(Example, id=object_id)\n\n #\n # Create an options dictionary and pass it to uploadify_s3.UploadifyS3()\n # to set Uploadify options. See http://www.uploadify.com/documentation/.\n #\n # These options override any set in your project settings file.\n #\n # Here we specify the name of our JavaScript onComplete event handler.\n # See /media/js/uploadify_event_handlers.js.\n #\n\n options={\n 'onComplete' : 'uploadifyOnComplete',\n }\n\n #\n # The key_pattern set here will be sent to S3 as the 'key' form field\n # below. You can use it to set the key (e.g. name) of your uploaded objects. \n #\n \n key_pattern = 'example-%s/${filename}' % object_id\n \n #\n # Create a post_data dictionary and pass it to uploadify_s3.UploadifyS3()\n # to set any desired S3 POST variables.\n #\n # See:\n # http://docs.amazonwebservices.com/AmazonS3/latest/index.html?UsingHTTPPOST.html\n #\n # 'key' is the only required field that is not automatically set by DUS3. It\n # may be set here in the view or by setting the AWS_DEFAULT_KEY_PATTERN in\n # your project settings.\n #\n # Note: Some reports indicate that Flash/Uploadify has problems with HTTP \n # responses with an empty body. To avoid this, set a success_action_status\n # of 201, which forces S3 to return an XML document.\n #\n \n post_data={\n 'key': key_pattern,\n 'success_action_status': \"201\",\n }\n\n #\n # S3 uses conditions to validate the upload data. DUS3 automatically constructs\n # and includes conditions for most of the elements that will be sent to S3, but you \n # need to pass in conditions for:\n # - 'key', whose value changes at upload time. Note that the condition's value\n # must correspond to the key pattern set above.\n # - any extra elements set at upload time\n #\n # See the DUS3 README for more information on the conditions mapping:\n # https://github.com/sbc/django-uploadify-s3\n #\n\n conditions={\n 'key': {'op': 'starts-with', 'value': 'example-%s/' % object_id},\n }\n\n #\n # Initialize UploadifyS3 and call get_options_json() to get the Uploadify\n # JSON payload. \n #\n \n uploadify_options = uploadify_s3.UploadifyS3(\n uploadify_options=options,\n post_data=post_data, \n conditions=conditions\n ).get_options_json()\n\n #\n # Pass the Uploadify JSON payload to the file_upload template as extra_context.\n # \n \n return direct_to_template(request, 'examples/example_file_upload.html', extra_context={ 'example': example, 'uploadify_options': uploadify_options })", "def s3_server(request):\n return _s3_server(request)" ]
[ "0.65627086", "0.6511997", "0.64451045", "0.63643384", "0.61173415", "0.6040773", "0.6031718", "0.60039645", "0.5967601", "0.5940069", "0.5857561", "0.58238834", "0.5796205", "0.574195", "0.57342345", "0.57331437", "0.5722847", "0.57148886", "0.57147944", "0.5705097", "0.56549144", "0.56406605", "0.563439", "0.5629561", "0.56099606", "0.5603903", "0.55994695", "0.5590016", "0.55867386", "0.5584057", "0.55780596", "0.5575477", "0.5557533", "0.554377", "0.55392116", "0.5531935", "0.55263096", "0.55259955", "0.55206525", "0.5510222", "0.55091125", "0.5504084", "0.54386216", "0.54345983", "0.5429578", "0.54293853", "0.54216516", "0.5417827", "0.5397067", "0.5395787", "0.53945464", "0.53786194", "0.5375195", "0.5373932", "0.5370156", "0.5367167", "0.53621876", "0.5342214", "0.53405184", "0.53382623", "0.5336416", "0.53308374", "0.53226304", "0.5313699", "0.5307767", "0.5303802", "0.530271", "0.529749", "0.5296964", "0.5296964", "0.52956367", "0.52860045", "0.5285865", "0.52732426", "0.5260543", "0.5249367", "0.5246524", "0.5246495", "0.5245737", "0.5240614", "0.52346677", "0.52279645", "0.5224991", "0.52215135", "0.52207196", "0.5214877", "0.5214306", "0.5213549", "0.5210055", "0.52086824", "0.5206874", "0.5203439", "0.5192046", "0.51892334", "0.5183147", "0.51721567", "0.5164003", "0.5157826", "0.5152527", "0.5149802" ]
0.7696398
0
Saves an XML string as a unique (checksummed) file in S3 And returns the URL of the file Checks for duplicates along the way using sha1 to find collisions
def save_svg(xml_string, checksum=None): if checksum is None: checksum = get_checksum(xml_string) # Get checksum of this file existing_url = is_duplicate_checksum(checksum) # Make sure it's unique if existing_url is not None: # We've generated this file before. logger.info('Duplicate detected for %s' % checksum) return existing_url # If dupe_check has a value, it's a URL to an existing (duplicate) file. # Usually, we've already checked for a duplicate - the above logic is just for cases # where we need to generate the checksum on the backend filename = get_filename(checksum) url = upload_svg(filename, xml_string) return url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_duplicate_checksum(checksum):\n s3 = boto3.client('s3')\n response = s3.list_objects_v2(\n Bucket=BUCKET,\n EncodingType='url',\n Prefix=checksum\n )\n\n if response['KeyCount'] > 0 and len(response['Contents']) > 0:\n return 'https://s3.amazonaws.com/%s/%s' % (BUCKET, response['Contents'][0]['Key'])\n\n return None", "def upload_svg(filename, xml_string):\n s3 = boto3.client('s3')\n response = s3.put_object(\n ACL='public-read',\n Body=xml_string,\n Bucket=BUCKET,\n Key=filename,\n StorageClass='REDUCED_REDUNDANCY',\n )\n\n return 'https://s3.amazonaws.com/%s/%s' % (BUCKET, filename)", "def download(url, bucket_id, key_prefix):\n\n baseFile = '_'.join(url.split('/')[-4:]) #os.path.basename(url)\n\n #move the file to a more uniq path\n os.umask(0002)\n temp_path = \"/tmp/\"\n file = os.path.join(temp_path,baseFile)\n bucket = conn.get_bucket(bucket_id)\n key = bucket.get_key(key_prefix + baseFile, validate=False)\n s3_exists = key.exists()\n file_exists = os.path.isfile(file)\n \n if not file_exists and s3_exists:\n sys.stderr.write(\"Downloading %s from S3\\n\"%url)\n key.get_contents_to_filename(file)\n sys.stderr.write(\"Downloaded %s from S3\\n\"%url)\n elif not file_exists and not s3_exists:\n sys.stderr.write(\"Downloading %s from the web\\n\"%url)\n try:\n req = urllib2.urlopen(url)\n total_size = int(req.info().getheader('Content-Length').strip())\n downloaded = 0\n CHUNK = 256 * 10240\n with open(file, 'wb') as fp:\n while True:\n chunk = req.read(CHUNK)\n downloaded += len(chunk)\n #print math.floor( (downloaded / total_size) * 100 )\n if not chunk: break\n fp.write(chunk)\n except urllib2.HTTPError, e:\n sys.stderr.write(\"HTTP Error: %s %s\\n\"%(e.code , url))\n return False\n except urllib2.URLError, e:\n sys.stderr.write(\"URL Error: %s %s\\n\"%(e.reason , url))\n return False\n sys.stderr.write(\"Downloaded %s from the web\\n\"%url)\n\n if not s3_exists:\n sys.stderr.write(\"Uploading %s to S3\\n\"%url)\n key.set_contents_from_filename(file)\n\n sys.stderr.write(\"File ready: %s\\n\"%url)\n return file", "def _s3_stash(self):\n s3_url = 's3://{}/{}'.format(BUCKET, self.atom_file)\n bucketpath = BUCKET.strip(\"/\")\n bucketbase = BUCKET.split(\"/\")[0]\n parts = urlparse.urlsplit(s3_url)\n mimetype = 'application/xml' \n \n conn = boto.connect_s3()\n\n try:\n bucket = conn.get_bucket(bucketbase)\n except boto.exception.S3ResponseError:\n bucket = conn.create_bucket(bucketbase)\n self.logger.info(\"Created S3 bucket {}\".format(bucketbase))\n\n if not(bucket.get_key(parts.path)):\n key = bucket.new_key(parts.path)\n key.set_metadata(\"Content-Type\", mimetype)\n key.set_contents_from_filename(self.atom_file)\n msg = \"created {0}\".format(s3_url)\n self.logger.info(msg)\n else:\n key = bucket.get_key(parts.path)\n key.set_metadata(\"Content-Type\", mimetype)\n key.set_contents_from_filename(self.atom_file)\n msg = \"re-uploaded {}\".format(s3_url)\n self.logger.info(msg)", "def test_parse_url(self):\n filename = 'demo-file.tar.gz'\n backend = BackendS3(**self.config)\n pb = PathBuilder('123456')\n base_url = backend.get_url()\n id = utils.generate_id(filename)\n parts = backend.id_to_path(id)\n path = '/'.join(parts)\n object_url = base_url + '/' + path + '/'\n original = object_url + filename\n crop_filename = pb.get_auto_crop_filename(id, '100x100', 'fit', 'jpg')\n resize = object_url + crop_filename\n result1 = backend.parse_url(original)\n result2 = backend.parse_url(resize)\n self.assertEquals(id, result1[0])\n self.assertEquals(filename, result1[1])\n self.assertEquals(id, result2[0])\n self.assertEquals(crop_filename, result2[1])", "def sync_to_bucket(s3_url,\n region='eu-west-1',\n profile_name=None):\n\n parsed_s3_url = urlparse.urlparse(s3_url);\n\n bucket_name = parsed_s3_url.hostname;\n key_prefix = parsed_s3_url.path;\n if key_prefix[0] == '/':\n key_prefix = key_prefix[1:]\n if key_prefix[-1] != '/':\n key_prefix = key_prefix + '/'\n\n def inner(fn_inner):\n \"\"\"\n Decorator function function sent in should be having signature\n func(None,None, XmlDoc) and should yield JSON document one for\n each file that should be persisted to S3\n \"\"\"\n\n def handler(event, context):\n \"\"\"\n The AWS Lambda Entry Point\n \"\"\"\n s3conn = s3.connect_to_region(region, profile_name=profile_name)\n bucket = s3conn.get_bucket(bucket_name)\n\n # Use a map to track keys that are no longer in the feed, used for deletion\n remaining_keys = { key.name : True for key in bucket.list(prefix=key_prefix)}\n\n logger.debug(\"Existing keys in bucket\\n%s\", '\\n'.join(remaining_keys));\n\n for id, json_data in fn_inner():\n key_name = key_prefix + str(uuid.uuid5(uuid.NAMESPACE_URL, id.encode('utf-8')))\n\n # Key found, delete it from cleanup map\n if key_name in remaining_keys:\n del remaining_keys[key_name]\n\n string_data = json.dumps(json_data)\n s3_object = bucket.get_key(key_name)\n if s3_object == None:\n key = bucket.new_key(key_name);\n key.set_contents_from_string(string_data)\n logger.info('Creating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n else:\n if s3_object.etag[1:len(s3_object.etag)-1] != s3etag.from_string(string_data):\n logger.info('Updating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n s3_object.set_contents_from_string(string_data)\n else:\n logger.info('Same:\\ts3://%s/%s', bucket_name, key_name);\n logger.debug(string_data)\n\n # Remvoe remaining keys from the bucket to allow for cleanup\n for key in remaining_keys:\n logger.info('Removing:\\ts3://%s/%s', bucket_name, key);\n bucket.delete_key(key);\n\n logger.info('Done');\n\n return handler\n\n return inner", "def xml_hash(xml):\n root = defusedxml.lxml.fromstring(xml)\n nodes = [unicode(node.tag) for node in root.iter()]\n return hashlib.sha256((u''.join(nodes)).encode('utf8')).hexdigest()", "def test_same_sha(self):\n self.create_archive(fields={}, files={\"foo\": \"bar\"})\n file_ = File.objects.create()\n file_.putfile(BytesIO(b\"bar\"))\n self.create_release_file(file=file_)\n\n index = read_artifact_index(self.release, None)\n assert file_.checksum == index[\"files\"][\"fake://foo\"][\"sha1\"]", "def _check_final_md5(self, key, file_name):\r\n fp = open(file_name, 'r')\r\n if key.bucket.connection.debug >= 1:\r\n print 'Checking md5 against etag.'\r\n hex_md5 = key.compute_md5(fp)[0]\r\n if hex_md5 != key.etag.strip('\"\\''):\r\n file_name = fp.name\r\n fp.close()\r\n os.unlink(file_name)\r\n raise ResumableDownloadException(\r\n 'File changed during download: md5 signature doesn\\'t match '\r\n 'etag (incorrect downloaded file deleted)',\r\n ResumableTransferDisposition.ABORT)", "def downloadfile(self):\n req = requests.get(self.url, stream=True)\n mdsha256 = hashlib.sha256()\n with gzip.open(self.file_path, \"wb\") as gfile:\n for line in req.iter_lines():\n if line:\n gfile.write(line + b\"\\n\")\n mdsha256.update(line + b\"\\n\")\n\n with open(self.sha_file_name, \"wb\") as sfile:\n sfile.write(mdsha256.digest())\n\n sha256 = mdsha256.digest()\n if self.sha256 != sha256:\n self.sha256 = sha256\n print(\"File updated!\")\n else:\n print(\"File not updated!\")", "def download(self, key_name):\n #Flat structure for now\n fn = basename(key_name)\n\n key = self.bucket.get_key(key_name)\n md5 = key.etag[1 :-1]\n\n local_md5 = hashfile(fn, hashlib.md5())\n\n if not local_md5 == md5:\n key.get_contents_to_filename(fn)", "def __init__(self, url, dest_path):\n self.url = url\n self.dest_path = dest_path\n self.file_name = self.url.split('/')[-1]\n self.file_path = f\"{self.dest_path}/{self.file_name}.gz\"\n self.sha_file_name = f\"{self.dest_path}/.{self.file_name}.sha256\"\n print(self.sha_file_name)\n self.sha256 = \"\"\n if not os.path.exists(self.dest_path):\n os.makedirs(self.dest_path)\n else:\n if os.path.exists(self.sha_file_name):\n with open(self.sha_file_name, 'rb') as shafile:\n self.sha256 = shafile.read()", "def get_content_sha1(self):", "def url_for(filename):\n return \"{}{}\".format(S3_LOCATION, filename)", "def save_previously_valid_urls(previously_valid_urls):\n\n LOGGER.info(f\"Writing previously_valid_urls to bucket {BUCKET} with key {PREVIOUSLY_VALID_URLS_S3_KEY}.\") \n save_json(BUCKET, PREVIOUSLY_VALID_URLS_S3_KEY, previously_valid_urls)", "def encode(self, longUrl: str) -> str:\n while True:\n result = hashlib.sha256(longUrl.encode()).hexdigest()\n shortUrl = result[:7]\n if longUrl not in self.bucket.get(shortUrl):\n self.bucket.put(shortUrl, longUrl)\n break \n return shortUrl", "def test_upload_hash_generation(self):\n cache = DummyCache()\n pkg = cache.upload(\"a-1.tar.gz\", BytesIO(b\"test1234\"), \"a\")\n self.assertEqual(\n pkg.data[\"hash_sha256\"],\n \"937e8d5fbb48bd4949536cd65b8d35c426b80d2f830c5c308e2cdec422ae2244\",\n )\n self.assertEqual(pkg.data[\"hash_md5\"], \"16d7a4fca7442dda3ad93c9a726597e4\")", "def s3_md5(s3key, blocksize=65536):\n return s3key.etag.strip('\"').strip(\"'\")", "def xml_get_sha512(xml, secret):\n xml_string = xml_to_string(xml, encode_base64=False) + secret\n return hashlib.sha512(xml_string).hexdigest()", "def from_string(the_string):\n hash = hashlib.md5()\n\n buf = StringIO.StringIO(the_string)\n for block in iter(lambda: buf.read(AWS_UPLOAD_PART_SIZE), \"\"):\n hash.update(block)\n return hash.hexdigest()", "def checksum(self, filepath) -> str:\n if os.path.exists(filepath):\n hash_md5 = md5()\n with open(filepath, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return urlsafe_b64encode(hash_md5.digest()).decode('utf-8')\n\n return \"\"", "def apkdownloadmirror_get_sha1_sum(soup, **_):\n return soup.find(text=re.compile(r'File APK Sha1:')).next.text.strip()", "def sha1sum(filename):\n if not os.path.isfile(filename):\n return ''\n hasher = hashlib.sha1()\n with open(filename, 'rb') as hash_file:\n buf = hash_file.read(HASH_BLOCK_SIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = hash_file.read(HASH_BLOCK_SIZE)\n return hasher.hexdigest()", "def _get_sha1(file_descriptor):\n sha1 = hashlib.sha1()\n for block in iter(partial(file_descriptor.read, BLOCK_SIZE), ''):\n sha1.update(block)\n file_descriptor.seek(0)\n return sha1.hexdigest()", "def test_amazon_s3_store_filename(self):\n config = Config()\n metadata_bucket = config.config.get(\"metadata\", \"bucket\")\n data_bucket = config.config.get(\"data\", \"bucket\")\n metadata_provider = amazon.S3(config, metadata_bucket).connect()\n provider = amazon.S3(config, data_bucket).connect()\n key = checksum_file(\"LICENSE\")\n metadata_provider.store(key, \"LICENSE METADATA\")\n provider.store_from_filename(key, \"LICENSE\")\n t = tempfile.NamedTemporaryFile()\n metadata = metadata_provider.retrieve(key)\n provider.retrieve_to_filename(key, t.name)\n self.assertEqual(file(\"LICENSE\").read(), file(t.name).read())\n self.assertEqual(\"LICENSE METADATA\", metadata)\n metadata_provider.delete(key)\n provider.delete(key)\n metadata_provider.disconnect()\n provider.disconnect()", "def checksumFile(filename):\n return md5File(filename)", "def checkMD5(self, xml_string):\n if isinstance(xml_string, unicode):\n xml_string = xml_string.encode('utf-8')\n return md5(xml_string).hexdigest() == self.getContentMd5()", "def rss_md5(string):\r\n if not isinstance(string, basestring):\r\n try: string = string.decode('utf8','replace')\r\n except: pass\r\n md5 = hashlib.md5()\r\n md5.update(string.encode('utf8'))\r\n return md5.hexdigest()", "def sha_hash(file_name: str):\n BLOCKSIZE = 65536\n line = '' # format one line for hash\n with open(file_name, 'rb') as afile:\n buf = afile.read(BLOCKSIZE) # read each line of doc\n while len(buf) > 0:\n line += buf.decode('utf-8')\n buf = afile.read(BLOCKSIZE)\n\n hex = \"0x\" + sha1(line.encode()) # create sha1 hash\n return int(hex, 0)", "def GetFileSha1(file_path):\n return base64.b64encode(GetFileHashes(file_path, do_sha1=True)['sha1'])", "def upload_file_to_s3(self, file_data):\r\n\r\n file_key = file_data.name + datetime.now(UTC).strftime(\r\n xqueue_interface.dateformat\r\n )\r\n\r\n file_data.seek(0)\r\n s3_public_url = upload_to_s3(\r\n file_data, file_key, self.s3_interface\r\n )\r\n\r\n return s3_public_url", "def sha1(fname):\n fh = open(fname, 'rb')\n sha1 = hashlib.sha1()\n block = fh.read(2 ** 16)\n while len(block) > 0:\n sha1.update(block)\n block = fh.read(2 ** 16)\n\n return sha1.hexdigest()", "def hashfile(filename):\n BLOCKSIZE = 65536\n sha1 = hashlib.sha1()\n with open(filename, 'rb') as afile:\n buf = afile.read(BLOCKSIZE)\n while len(buf) > 0:\n sha1.update(buf)\n buf = afile.read(BLOCKSIZE)\n return(sha1.hexdigest())", "def _sha1(self):\n return hashlib.sha1(self._blob).hexdigest()", "def get_sha1(src: str) -> str:\n if not isinstance(src, str) or src == \"\":\n raise Exception(\"Invalid src str\")\n i = io.BytesIO(bytearray(src, encoding='utf-8'))\n return get_sha1_from_stream(i)", "def get_filename(checksum):\n return '%s.svg' % checksum", "def delete_duplicate_files(conn, va_bucket):\n c = conn.cursor()\n query = \"\"\"select id, key from aws_files where action = 'delete'\n and action_completed_at is null\"\"\"\n c.execute(query)\n for row in c.fetchall():\n pk = row[0]\n uuid = row[1]\n s3_url = S3_PREFIX + uuid\n sys.stderr.write(\"Removing {0}\\n\".format(uuid))\n delete_file(va_bucket, uuid)\n mark_as_completed(conn, pk)\n c.close()", "def _calc_sha1(path):\n calc = hashlib.sha1()\n with open(path, 'r') as f:\n calc.update(f.read())\n return calc.hexdigest()", "def s3_etag(url, proxies=None):\n\ts3_resource = boto3.resource (\"s3\", config=Config (proxies=proxies))\n\tbucket_name, s3_path = split_s3_path (url)\n\ts3_object = s3_resource.Object (bucket_name, s3_path)\n\treturn s3_object.e_tag", "def test_convert_id_to_path(self):\n backend = BackendS3(**self.config)\n filename = 'demo-test.tar.gz'\n id = utils.generate_id(filename)\n parts = backend.id_to_path(id)\n self.assertEquals(6, len(parts))\n self.assertEquals(filename, parts[5])", "def calchash(filename):\n sha = hashlib.sha1()\n with open(filename, 'rb') as f:\n sha.update(f.read())\n return sha", "def upload_file_by_url(s3_file_name, filename):\n full_path = os.path.join(CONFIG_BROKER['path'], \"tests\", \"integration\", \"data\", filename)\n\n if CONFIG_BROKER['local']:\n # If not using AWS, put file submission in location\n # specified by the config file\n broker_file_path = CONFIG_BROKER['broker_files']\n copy(full_path, broker_file_path)\n submitted_file = os.path.join(broker_file_path, filename)\n return {'bytesWritten': os.path.getsize(submitted_file), 's3FileName': full_path}\n else:\n # Use boto to put files on S3\n s3conn = boto.s3.connect_to_region(CONFIG_BROKER[\"aws_region\"])\n bucket_name = CONFIG_BROKER['aws_bucket']\n key = Key(s3conn.get_bucket(bucket_name))\n key.key = s3_file_name\n bytes_written = key.set_contents_from_filename(full_path)\n return {'bytesWritten': bytes_written, 's3FileName': s3_file_name}", "def sha1sum(filename):\n with open(filename, mode='rb') as f:\n d = hashlib.sha1()\n for buf in iter(functools.partial(f.read, 1024*100), b''):\n d.update(buf)\n return d.hexdigest()", "def _save_to_manifest_bucket(config: dict, standard_json: dict) -> bool:\n if \"id\" in standard_json:\n key_name = standard_json[\"id\"] + \"/standard/index.json\"\n success_flag = _save_json_to_s3(config['manifest-server-bucket'], key_name, standard_json)\n return success_flag", "def s3_etag(url):\n s3_resource = boto3.resource(\"s3\")\n bucket_name, s3_path = split_s3_path(url)\n s3_object = s3_resource.Object(bucket_name, s3_path)\n return s3_object.e_tag", "def s3_etag(url):\n s3_resource = boto3.resource(\"s3\")\n bucket_name, s3_path = split_s3_path(url)\n s3_object = s3_resource.Object(bucket_name, s3_path)\n return s3_object.e_tag", "def get_xml_file_url(self, elife_id):\n\t\txml_url = \"https://s3.amazonaws.com/\" + self.settings.cdn_bucket + \"/elife-articles/\" + elife_id + \"/elife\" + elife_id + \".xml\"\n\t\t\n\t\treturn xml_url", "def download(url: str,\n path: Optional[str] = None,\n overwrite: Optional[bool] = False,\n sha1_hash: Optional[str] = None,\n retries: Optional[int] = 5,\n verify_ssl: Optional[bool] = True,\n anonymous_credential: Optional[bool] = True) -> str:\n is_s3 = url.startswith(S3_PREFIX)\n if is_s3:\n boto3, botocore = try_import_boto3()\n s3 = boto3.resource('s3')\n if boto3.session.Session().get_credentials() is None or anonymous_credential:\n from botocore.handlers import disable_signing\n s3.meta.client.meta.events.register('choose-signer.s3.*', disable_signing)\n components = url[len(S3_PREFIX):].split('/')\n if len(components) < 2:\n raise ValueError('Invalid S3 url. Received url={}'.format(url))\n s3_bucket_name = components[0]\n s3_key = '/'.join(components[1:])\n if path is None:\n fname = url.split('/')[-1]\n # Empty filenames are invalid\n assert fname, 'Can\\'t construct file-name from this URL. ' \\\n 'Please set the `path` option manually.'\n else:\n path = os.path.expanduser(path)\n if os.path.isdir(path):\n fname = os.path.join(path, url.split('/')[-1])\n else:\n fname = path\n assert retries >= 0, \"Number of retries should be at least 0, currently it's {}\".format(\n retries)\n\n if not verify_ssl:\n warnings.warn(\n 'Unverified HTTPS request is being made (verify_ssl=False). '\n 'Adding certificate verification is strongly advised.')\n\n if overwrite or not os.path.exists(fname) or (sha1_hash and not sha1sum(fname) == sha1_hash):\n dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname)))\n if not os.path.exists(dirname):\n os.makedirs(dirname, exist_ok=True)\n while retries + 1 > 0:\n # Disable pyling too broad Exception\n # pylint: disable=W0703\n try:\n print('Downloading {} from {}...'.format(fname, url))\n if is_s3:\n response = s3.meta.client.head_object(Bucket=s3_bucket_name,\n Key=s3_key)\n total_size = int(response.get('ContentLength', 0))\n random_uuid = str(uuid.uuid4())\n tmp_path = '{}.{}'.format(fname, random_uuid)\n if tqdm is not None:\n def hook(t_obj):\n def inner(bytes_amount):\n t_obj.update(bytes_amount)\n return inner\n with tqdm.tqdm(total=total_size, unit='iB', unit_scale=True) as t:\n s3.meta.client.download_file(s3_bucket_name, s3_key, tmp_path,\n Callback=hook(t))\n else:\n s3.meta.client.download_file(s3_bucket_name, s3_key, tmp_path)\n else:\n r = requests.get(url, stream=True, verify=verify_ssl)\n if r.status_code != 200:\n raise RuntimeError('Failed downloading url {}'.format(url))\n # create uuid for temporary files\n random_uuid = str(uuid.uuid4())\n total_size = int(r.headers.get('content-length', 0))\n chunk_size = 1024\n if tqdm is not None:\n t = tqdm.tqdm(total=total_size, unit='iB', unit_scale=True)\n with open('{}.{}'.format(fname, random_uuid), 'wb') as f:\n for chunk in r.iter_content(chunk_size=chunk_size):\n if chunk: # filter out keep-alive new chunks\n if tqdm is not None:\n t.update(len(chunk))\n f.write(chunk)\n if tqdm is not None:\n t.close()\n # if the target file exists(created by other processes)\n # and have the same hash with target file\n # delete the temporary file\n if not os.path.exists(fname) or (sha1_hash and not sha1sum(fname) == sha1_hash):\n # atomic operation in the same file system\n replace_file('{}.{}'.format(fname, random_uuid), fname)\n else:\n try:\n os.remove('{}.{}'.format(fname, random_uuid))\n except OSError:\n pass\n finally:\n warnings.warn(\n 'File {} exists in file system so the downloaded file is deleted'.format(fname))\n if sha1_hash and not sha1sum(fname) == sha1_hash:\n raise UserWarning(\n 'File {} is downloaded but the content hash does not match.'\n ' The repo may be outdated or download may be incomplete. '\n 'If the \"repo_url\" is overridden, consider switching to '\n 'the default repo.'.format(fname))\n break\n except Exception as e:\n retries -= 1\n if retries <= 0:\n raise e\n\n print('download failed due to {}, retrying, {} attempt{} left'\n .format(repr(e), retries, 's' if retries > 1 else ''))\n\n return fname", "def wp_fp(self,url):\r\n\t\ttree = xml.etree.ElementTree.parse(\"doc/wp_versions.xml\")\r\n\t\tp = tree.findall(\"file\")\r\n\t\t#p2 = tree.findall(\"file/hash\")\r\n\t\t#p3 = tree.findall(\"file/hash/version\")\r\n\t\tfor elem in p:\r\n\t\t\ts = elem.getchildren()\r\n\t\t\tsrc = elem.attrib[\"src\"]\r\n\t\t\tcontent = self.get_cont(url+\"/\"+src)\r\n\t\t\tmd5p = md5.new(content).hexdigest()\r\n\t\t\t#print src\r\n\t\t\tfor ele in s:\r\n\t\t\t\tmd5c = ele.attrib[\"md5\"]\r\n\t\t\t\t#print \"[!] comparing \"+md5c+\" hash for \"+src+\" : \"+md5p\r\n\t\t\t\tif (md5c == md5p):\r\n\t\t\t\t\tr = ele.getchildren()\r\n\t\t\t\t\treturn r[0].text\r\n\t\t\t#print md5\r", "def hash_file(file_like_object):\n checksum = hashlib.sha1()\n for chunk in iter(lambda: file_like_object.read(32768), b''):\n encoded_chunk = (chunk.encode(encoding='utf-8')\n if isinstance(chunk, six.string_types) else chunk)\n checksum.update(encoded_chunk)\n return checksum.hexdigest()", "def download_reference(s3_path, working_dir):\n\n reference_folder = os.path.join(working_dir, 'reference')\n\n try:\n os.mkdir(reference_folder)\n except Exception as e:\n pass\n\n download_folder(s3_path, reference_folder)\n\n # Update sorted reference\n update_sorted_reference(reference_folder)\n\n return reference_folder", "def upload(self, filename: str, content: bytes) -> str:\n f_hash = hashlib.md5(content).hexdigest()\n dst_path = datetime.today().strftime(\"%Y/%m/%d\")\n\n resp = self.client.put_object(\n Bucket=self.bucket_name,\n Key=f'{self.folder_name}/{dst_path}/{filename}',\n Body=content,\n ContentLength=len(content),\n )\n\n info = self.client.head_object(\n Bucket=self.bucket_name,\n Key=f'{self.folder_name}/{dst_path}/{filename}'\n )\n\n if resp.get('ETag', '') != f'\"{f_hash}\"' or info.get('ContentLength', 0) == 0:\n raise RuntimeError(f\"File \\\"{filename}\\\" wasn't uploaded\")\n\n return f'{self.endpoint_url}/{self.bucket_name}/{self.folder_name}/{dst_path}/{filename}'", "def get_hash(content):\n return hashlib.sha1(content).hexdigest()", "def put(self, path: str, filename: str) -> None:\n\n payload_hash, content_md5, length = _hash(path)\n\n now = datetime.datetime.utcnow()\n timestamp = now.strftime('%Y%m%dT%H%M%SZ')\n headers = [\n ('Connection', 'keep-alive'),\n ('Content-Length', str(length)),\n ('Content-MD5', content_md5),\n ('Content-Type', 'application/zip'),\n ('Date', now.strftime('%a, %d %b %Y %H:%M:%S GMT')),\n ('Host', '%s.s3.amazonaws.com' % self.bucket),\n ('x-amz-content-sha256', payload_hash),\n ('x-amz-date', timestamp),\n ]\n signed_headers = ';'.join(header[0].lower() for header in headers)\n canonical_request = 'PUT\\n%s\\n\\n%s\\n\\n%s\\n%s' % (filename, '\\n'.join(\n ('%s:%s' % (header[0].lower(), header[1])\n for header in headers)), signed_headers, payload_hash)\n logging.debug('canonical request %r',\n canonical_request.encode('utf-8'))\n string_to_sign = 'AWS4-HMAC-SHA256\\n%s\\n%s\\n%s' % (\n timestamp, self.scope,\n hashlib.sha256(canonical_request.encode('utf-8')).hexdigest())\n logging.debug('string to sign %r', string_to_sign.encode('utf-8'))\n\n signature = hmac.new(self.signing_key,\n string_to_sign.encode('utf-8'),\n digestmod='sha256').hexdigest()\n headers.append((\n 'Authorization',\n 'AWS4-HMAC-SHA256 Credential=%s/%s,SignedHeaders=%s,Signature=%s' %\n (self.aws_access_key, self.scope, signed_headers, signature)))\n with open(path, 'rb') as file_stream:\n if not self.conn:\n self.conn = http.client.HTTPSConnection('%s.s3.amazonaws.com' %\n self.bucket)\n try:\n self.conn.request('PUT',\n filename,\n file_stream,\n headers=dict(headers))\n res = self.conn.getresponse()\n payload = res.read()\n except (http.client.BadStatusLine, http.client.ResponseNotReady,\n http.client.CannotSendRequest):\n self.conn.close()\n raise\n if res.status != 200:\n raise Exception(payload.decode('utf-8'))", "def singlePartUpload(self, eachfiledic):\n self.CalculateMd5OfEachFile(eachfiledic)\n fileobj = open(eachfiledic[\"filepath\"], 'rb')\n toReturn = False\n try:\n response = client.put_object(Body=fileobj, Bucket=self.bucketName,\n Key=eachfiledic[\"filename\"], ContentMD5=eachfiledic[\"md5\"])\n s3Log.info (\"{} got uploaded on s3 bucket = {}\\n\".format(eachfiledic[\"filepath\"], self.bucketName))\n toReturn = True\n except (ClientError, boto3.exceptions.S3UploadFailedError) as e:\n s3Log.error (\"FAILED TO UPLOAD file:{}\\n\".format(eachfiledic[\"filename\"]) )\n if \"Error\" in e.response:\n s3Log.error(e.response[\"Error\"])\n\n fileobj.close()\n return toReturn", "def get_url_data(url):\n\n # Return data while saving the data in a file \n # which is a hash of the URL\n data = requests.get(url).content\n # Save it in a filename\n filename = hashlib.md5(url.encode(\"utf8\")).hexdigest()\n# open(filename, 'w').write(data)\n with open(filename, \"w\") as fileObj:\n fileObj.write(data.decode(\"utf8\"))\n return data", "def test_upload_prepend_hash(self):\n self.storage.prepend_hash = True\n package = make_package()\n data = StringIO()\n self.storage.upload(package, data)\n key = list(self.bucket.list())[0]\n\n pattern = r'^[0-9a-f]{4}/%s/%s$' % (re.escape(package.name),\n re.escape(package.filename))\n match = re.match(pattern, key.key)\n self.assertIsNotNone(match)", "def hash_file(filename):\n # make a hash object\n h = hashlib.sha1()\n \n # open file for reading in binary mode\n with open(filename,'rb') as file:\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n # return the hex representation of digest\n return h.hexdigest()", "def hash_file(filename):\n # make a hash object\n h = hashlib.sha1()\n \n # open file for reading in binary mode\n with open(filename,'rb') as file:\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n # return the hex representation of digest\n return h.hexdigest()", "def uri_to_file(uri):\n if not uri:\n return\n\n header, data = uri.split(',')\n md5_hash = hashlib.md5(data).hexdigest()\n with open(\"static/images/%s.jpg\" % md5_hash, 'wb') as f:\n f.write(data.decode('base64'))\n return md5_hash", "def upload_and_get_url(file_to_upload, extension):\n\timport string, random\n\tfilename = binascii.b2a_hex(os.urandom(30))+extension\n\tput = client.put_file(filename, file_to_upload) \n\tshare = client.share(filename, short_url=False)\n\treturn {'url':share['url'].replace('https://www.dropbox.com/', 'https://dl.dropboxusercontent.com/'), 'filename':filename}", "def test_force_put_to_overwrite_existing(self):\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n uploads = self.upload_path\n filename = 'demo-test.tar.gz'\n src1 = os.path.join(uploads, filename)\n src2 = os.path.join(uploads, 'test.jpg')\n id = utils.generate_id(filename)\n backend.put_variant(src1, id, filename)\n backend.put_variant(src2, id, filename, True)\n\n path = '/'.join(backend.id_to_path(id)) + '/' + filename\n client = boto3.client('s3', **backend.credentials)\n res = client.head_object(Bucket=backend.bucket_name, Key=path)\n self.assertEquals(\n str(os.path.getsize(src2)),\n str(res['ResponseMetadata']['HTTPHeaders']['content-length'])\n )", "def hash_file(file_name):\n BLOCKSIZE = 65536\n hasher = hashlib.sha1()\n with open(file_name, 'rb') as afile:\n buf = afile.read(BLOCKSIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(BLOCKSIZE)\n return(hasher.hexdigest())", "def sha1(s: str) -> str:\n return hashlib.sha1(s.encode()).hexdigest()", "def pushToS3()-> None:\n logging.info(f\"Connecting to s3 {getTime()}\")\n s3 = boto3.client(\"s3\",endpoint_url=\"http://localhost:4566\")\n if(not s3.head_bucket(Bucket=\"demo\")):\n s3.create_bucket(Bucket='demo')\n try:\n logging.info(f\"Uploading to s3 {getTime()}\")\n s3.upload_file(\"result.csv\",\"demo\",\"result.csv\")\n logging.info(f\"Finished uploding to s3 {getTime()}\")\n except ClientError as e:\n logging.error(f\"Error uploading file to S3 {getTime()}\")", "def unpack(uri):\n conn = boto.connect_s3(anon=True, host='s3.amazonaws.com')\n bucket = conn.get_bucket('commoncrawl')\n key_ = Key(bucket, uri)\n file_ = warc.WARCFile(fileobj=GzipStreamFile(key_))\n return file_", "def hash_file(filename):\r\n\r\n # make a hash object\r\n h = hashlib.sha1()\r\n\r\n # open file for reading in binary mode\r\n with open(filename,'rb') as file:\r\n\r\n # loop till the end of the file\r\n chunk = 0\r\n while chunk != b'':\r\n # read only 1024 bytes at a time\r\n chunk = file.read(1024)\r\n h.update(chunk)\r\n\r\n # return the hex representation of digest\r\n return h.hexdigest()", "def file_checksum(filename):\n hash_md5 = hashlib.md5()\n with tf.gfile.Open(filename, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n f.close()\n return hash_md5.hexdigest()", "def download_and_validate_checksum(name, checksum):\n dst = os.path.join(DOWNLOADS_DIR, os.path.basename(name))\n download_file(src=name, dst=dst)\n md5 = hashlib.md5()\n for chunk in chunked_reader(dst):\n md5.update(chunk)\n dl_checksum = md5.digest().hex()\n if dl_checksum != checksum:\n raise ValueError(f\"expected checksum {checksum} but received {dl_checksum}\")\n os.remove(dst)", "def sha1HashFile(self, filename: Path):\n bufferSize = 65536\n sha1Hash = hashlib.sha1()\n\n with filename.open('rb') as f:\n while True:\n data = f.read(bufferSize)\n\n if not data:\n break\n\n sha1Hash.update(data)\n\n return str(sha1Hash.hexdigest())", "def test_put_file(self):\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n uploads = self.upload_path\n src = os.path.join(uploads, 'demo-test.tar.gz')\n id = utils.generate_id('demo-test.tar.gz')\n backend.put(src, id)\n path = '/'.join(backend.id_to_path(id)) + '/demo-test.tar.gz'\n self.assertTrue(backend.exists(path))", "def hash_file(filename):\n # make a hash object\n h = hashlib.sha1()\n # open file for reading in binary mode\n with open(filename,'rb') as file:\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n # return the hex representation of digest\n return h.hexdigest()", "def file_sha1(file_name, ignore_format=False, max_call_times=None):\r\n _FILE_SLIM = 65536 # read stuff in 64kb chunks!\r\n call_times = 0\r\n my_sha1 = hashlib.sha1()\r\n with open(file_name, \"rb\") as ob:\r\n while True:\r\n data = ob.read(_FILE_SLIM)\r\n if not data:\r\n break\r\n if ignore_format:\r\n data = data.decode(encoding=\"utf-8\")\r\n data = data.replace(\"\\r\", '')\r\n data = data.replace(\"\\n\", '')\r\n data = data.encode(encoding=\"utf-8\")\r\n if max_call_times:\r\n call_times += 1\r\n if call_times > max_call_times:\r\n break\r\n my_sha1.update(data)\r\n return my_sha1.hexdigest()", "def removeDuplicateUrl(inputfile, outputfile):\n\t\n\tlines_seen = set()\n\toutfile = open(outputfile, \"w\")\n\tfor line in open(inputfile, \"r\"):\n \t\tif line not in lines_seen:\n\t\t\toutfileput.write(line)\n\t\t\tlines_seen.add(line)\n\n\toutputfile.close()", "def sha1(self) -> str:\n return self.data.sha1", "def save_channel(xml):\n overwrite = False\n path = config.BASE_PATH + '/' + config.SUBMISSION_RSS_URL\n if not os.path.exists(path):\n overwrite = True\n else:\n size = os.path.getsize(path)\n if size != len(xml):\n overwrite = True\n else:\n from hashlib import md5\n of = open(path, 'rb')\n old_xml = of.read()\n of.close()\n old_md5 = md5(old_xml).digest()\n new_md5 = md5(xml).digest()\n if old_md5 != new_md5:\n overwrite = True\n if overwrite:\n print \"Writing latest RSS feed to %s\" % path\n f = open(path, 'wb')\n f.write(xml)\n f.close()", "def ensure_file(filename, old_contents=None, old_hash=None):\n hash_function = lambda text: hashlib.sha1(text.encode('utf-8')).digest()\n\n if old_hash is None and old_contents is not None:\n old_hash = hash_function(old_contents)\n\n if not os.path.exists(filename):\n # write the file if it doesn't exist\n if old_contents is not None:\n with open(filename, 'w') as f:\n f.write(old_contents)\n else:\n raise RuntimeError(\"No contents to write missing file \" +\n str(filename))\n\n with open(filename, mode='r') as f:\n contents = f.read()\n\n hashed = hash_function(contents)\n\n if old_hash and hashed != old_hash:\n raise RuntimeError(\"Existing file \" + str(filename) + \" does not\"\n + \" match stored file.\")\n\n return contents, hashed", "def checksum(item):\n return hashlib.sha256(obj_to_str(item).encode('utf-8')).hexdigest()", "def uploadFilestoS3(self):\n allfilesuploadedcount = 0\n for eachfiledic in self.fileTobeUploaded:\n if eachfiledic[\"uploadedSuccess\"] == 0: #Means this file never got uploaded.\n if os.path.getsize(eachfiledic[\"filepath\"]) < 1000000000: #<1GB\n s3Log.info (\"FileSize < 1GB for :{}, so using single part upload.\".format(eachfiledic[\"filepath\"]) )\n if self.singlePartUpload(eachfiledic) == True:\n eachfiledic[\"uploadedSuccess\"] = 1\n allfilesuploadedcount = allfilesuploadedcount + 1\n else:\n s3Log.info (\"FileSize > 1GB for :{}, so using Multi Part upload. \\n\".format(eachfiledic[\"filepath\"]) )\n if self.multiPartUpload(eachfiledic) == True:\n eachfiledic[\"uploadedSuccess\"] = 1\n allfilesuploadedcount = allfilesuploadedcount + 1\n\n\n elif eachfiledic[\"uploadedSuccess\"] == 1: #Means it got uploaded in the last run.\n allfilesuploadedcount = allfilesuploadedcount + 1\n\n self.saveStateOfThisRun()\n if len(self.fileTobeUploaded) == allfilesuploadedcount: #Means we uploaded all files in the queue\n return True\n else:\n return False", "def check_one_file(filein, observations, hash, update, conf, errors):\n try:\n import boto.s3.key\n except ImportError:\n pass\n\n # set up nap here so we don't have to pass conf further down,\n # makes it eaiser to unit test\n nap = NapContext(conf.data['sleepiness'])\n\n # test what type got passed in to :filein:\n filename = \"\"\n # we don't know if boto will be installed, must be better way\n # to detect if `filein` a string (ahem, unicode thingy) or an\n # AWS key than the try/except here\n s3 = False\n # do I have a local filesystem path or s3 bucket key?\n if isinstance(filein, six.string_types):\n filename = os.path.abspath(filein)\n try:\n if type(filein) is boto.s3.key.Key:\n s3 = True\n filename = 's3://{0}/{1}'.format(filein.bucket.name,\n filein.name)\n except NameError:\n pass\n\n if conf.app.ignore_re and re.match(conf.app.ignore_re, filename):\n logging.debug('skipped {0}'.format(filename))\n return\n\n # normalize filename, take hash for key\n filename_key = hashlib.sha224(filename.encode('utf-8')).hexdigest()\n logging.info('{0}'.format(filename))\n logging.debug('sha224 of path {0}'.format(filename_key))\n\n # dispatch, these are ripe for refactoring to take\n # a file object\n if s3:\n seen_now = analyze_s3_key(filein, hash, nap)\n else:\n seen_now = analyze_file(filename, hash, nap)\n\n logging.debug('seen_now {0}'.format(seen_now))\n\n # make sure things match\n if filename_key in observations and not update:\n news = {}\n looks_the_same = compare_sightings(\n seen_now, observations[filename_key], news\n )\n if not looks_the_same:\n track_error(filename, \"%r has changed\" % filename, errors)\n elif any(news):\n update = observations[filename_key]\n update.update(news)\n observations[filename_key] = update\n observations.sync()\n logging.debug('new memory {0}'.format(news))\n # update observations\n else:\n observations[filename_key] = seen_now\n observations.sync()\n logging.info('update observations')", "def save_image(url):\n ext = url.split('.')[-1]\n filename = IMAGEDIR+os.sep+hashlib.md5(url.encode('utf-8')).hexdigest()+'.'+ext\n if os.path.exists(filename):\n return filename\n try:\n content = urlopen(url).read()\n f = open(filename,'wb') \n f.write(content)\n f.close()\n except:\n return None\n return filename", "def test_put_with_sequential_ids(self):\n filename = 'demo-test.tar.gz'\n base_id = utils.generate_id(filename).replace('-' + filename, '')\n id1 = base_id + '1-' + filename\n id2 = base_id + '2-' + filename\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n uploads = self.upload_path\n src = os.path.join(uploads, 'demo-test.tar.gz')\n backend.put_variant(src, id1, 'demo-test.tar.gz')\n backend.put_variant(src, id2, 'demo-test.tar.gz')\n path1 = '/'.join(backend.id_to_path(id1)) + '/demo-test.tar.gz'\n path2 = '/'.join(backend.id_to_path(id2)) + '/demo-test.tar.gz'\n self.assertTrue(backend.exists(path1))\n self.assertTrue(backend.exists(path2))", "def check_duplicates(self, file_path):\n\t\tif not file_path:\n\t\t\treturn file_path\n\t\tif not self.settings.get('deduplicate_files', True):\n\t\t\t# Deduplication disabled.\n\t\t\treturn file_path\n\t\twas_new, existing_path = hashjar.add_hash(file_path) # Check if the file exists already.\n\t\tif not was_new:\n\t\t\tprint(\"\\tFile already exists! Resolving...\")\n\t\t\t# Quick and dirty comparison, assumes larger filesize means better quality.\n\t\t\tif os.path.isfile(file_path) and os.path.isfile(existing_path):\n\t\t\t\tif os.path.getsize(file_path) > os.path.getsize(existing_path):\n\t\t\t\t\tprint('\\t\\tNew file was better quality. Removing old file.')\n\t\t\t\t\tos.remove(existing_path)\n\t\t\t\t\tfor ele in self.loader.get_elements_for_file(existing_path):\n\t\t\t\t\t\tele.remap_file(existing_path, file_path)\n\t\t\t\t\treturn file_path\n\t\t\t\telse:\n\t\t\t\t\tprint(\"\\tOld file was better quality, removing newer file.\")\n\t\t\t\t\tos.remove(file_path)\n\t\t\t\t\treturn existing_path\n\t\treturn file_path", "def get_sha1_from_stream(src: io.IOBase) -> str:\n if not isinstance(src, io.IOBase) or not src.readable():\n raise Exception(\"src is not stream or unreadable\")\n m: hashlib._hashlib.HASH = hashlib.sha1()\n return calc_hash(src, m)", "def test_retrieve_original_to_temp(self):\n # put file\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n src = os.path.join(self.upload_path, 'demo-test.tar.gz')\n id = utils.generate_id('demo-test.tar.gz')\n backend.put(src, id)\n\n # retrieve file\n result = backend.retrieve_original(id, self.tmp_path)\n expected_dst = os.path.join(self.tmp_path, id, 'demo-test.tar.gz')\n self.assertEquals(expected_dst, result)\n self.assertTrue(os.path.exists(expected_dst))", "def test_file_integrity_return_error_in_case_of_bad_md5():\n test_file = open('./testfile.tmp', 'a')\n test_file.close()\n\n test_file_path = os.path.realpath('./testfile.tmp')\n test_file_md5 = hashlib.md5(open(test_file_path, 'rb').read()).hexdigest()\n\n bad_md5 = 'some_noise_%s' % test_file_md5\n\n result = PackageDownloadHelper.check_file_integrity(test_file_path, bad_md5)\n\n assert isinstance(result, ApiResponse)", "def upload_with_checksum(request, md5chunk, md5total, chunk, chunks):\n filename = clean_filename(request.args['name'])\n dst = os.path.join(UPLOAD_DIR,filename)\n\n buf_len = int(request.args['chunk_size'])\n buf = request.stream.read(buf_len)\n\n md5 = hashlib.md5()\n md5.update(buf)\n if md5.hexdigest() != md5chunk:\n raise BadRequest(\"Checksum error\")\n\n # f = get_or_create_file(chunk, dst)\n # f.write(buf)\n # f.close()\n\n meta_key = dst\n write_meta_information_to_memcache(meta_key, md5total, chunk, chunks)\n return filename", "def sync_to_s3(pathname, bucket):\n BUCKET_MANAGER.sync(pathname, bucket)\n print(BUCKET_MANAGER.get_bucket_url(BUCKET_MANAGER.s3.Bucket(bucket)))", "def upload(file_path, aws_path, access_key, secret_key) -> None:\n # bucket = \"dev-com-courtlistener-storage\"\n bucket = \"seals.free.law\"\n client = boto3.client(\n \"s3\",\n aws_access_key_id=access_key,\n aws_secret_access_key=secret_key,\n )\n transfer = S3Transfer(client)\n if \".png\" in file_path:\n content_type = \"image/png\"\n else:\n content_type = \"image/svg+xml\"\n transfer.upload_file(\n file_path,\n bucket,\n aws_path,\n extra_args={\"ContentType\": content_type, \"ACL\": \"public-read\"},\n )\n print(f\"http://{bucket}.s3-us-west-2.amazonaws.com/{aws_path}\")", "def _HashFilename(filename):\n if isinstance(filename, unicode):\n filename = filename.encode(UTF8)\n else:\n filename = unicode(filename, UTF8).encode(UTF8)\n m = hashlib.sha1(filename)\n return 'TRACKER_' + m.hexdigest() + '.' + filename[-16:]", "def url_to_filename(url, etag=None):\n\turl_bytes = url.encode ('utf-8')\n\turl_hash = sha256 (url_bytes)\n\tfilename = url_hash.hexdigest ()\n\n\tif etag:\n\t\tetag_bytes = etag.encode ('utf-8')\n\t\tetag_hash = sha256 (etag_bytes)\n\t\tfilename += '.' + etag_hash.hexdigest ()\n\n\treturn filename", "def fallback_name(orig_name=None):\r\n if looks_like_fallback(orig_name):\r\n # We're about to re-hash, in case something changed, so get rid of the tag_ and hash\r\n orig_name = orig_name[len(tag) + 1:-12]\r\n # append the hash of the content--the first 12 bytes should be plenty.\r\n orig_name = \"_\" + orig_name if orig_name not in (None, \"\") else \"\"\r\n xml_bytes = xml.encode('utf8')\r\n return tag + orig_name + \"_\" + hashlib.sha1(xml_bytes).hexdigest()[:12]", "def _hash_file_content(self, path):\n hasher = hashlib.sha1()\n with open(path, 'rb') as file:\n buffer = file.read(self.hash_block_size)\n while len(buffer) > 0:\n hasher.update(buffer)\n buffer = file.read(self.hash_block_size)\n return hasher.hexdigest()", "def _sha1_hash_file(self, config_type):\n config = self.CLOUDWATCH_CONFIG_TYPE_TO_CONFIG_VARIABLE_REPLACE_FUNC. \\\n get(config_type)()\n value = json.dumps(config)\n sha1_res = self._sha1_hash_json(value)\n return sha1_res", "def checksum(self, md5_file, file_name):\n try:\n with open(md5_file, 'r') as f:\n md5_file_contents = f.read()\n md5_str = md5_file_contents.split(' ')[0]\n os.remove(md5_file)\n except Exception as e:\n logging.exception('Could not read MD5 file {}. \\\n \\nTry to download the file again'.format(file_name))\n return False\n if not self.check_md5(file_name, md5_str):\n logging.error('Failed in checksum. Download the file again.')\n return False\n return True", "def hash_file ( filename ):\n sha1 = hashlib.sha1()\n with open( filename, 'rb' ) as f:\n while True:\n buf = f.read(65536) # read by 64kb buffers size\n if not buf:\n break\n sha1.update(buf)\n return sha1", "def get_previously_valid_urls():\n\n previously_valid_urls = get_json(BUCKET, PREVIOUSLY_VALID_URLS_S3_KEY)\n\n # If previously_valid_urls does not currently exist, create a new one and save to S3\n if not previously_valid_urls:\n previously_valid_urls = []\n save_previously_valid_urls(previously_valid_urls)\n\n return set(previously_valid_urls)", "def get_checksum(input_fname):\n with open(input_fname, \"rb\") as infile:\n file_contents = infile.read()\n\n checksum = hashlib.md5(file_contents).hexdigest()\n return checksum", "def aws_signature(bucket,keypath,expires,secret_access_key=''):\n sign_msg = ('GET\\n\\n\\n'+expires+'\\n' +'/'+bucket+'/'+keypath)\n h = hmac.new(secret_access_key, sign_msg, hashlib.sha1)\n signature = urllib.quote(base64.b64encode(h.digest()))\n return (signature,sign_msg)", "def check_sha1(filename, sha1_hash):\n sha1 = hashlib.sha1()\n with open(filename, 'rb') as f:\n while True:\n data = f.read(1048576)\n if not data:\n break\n sha1.update(data)\n\n sha1_file = sha1.hexdigest()\n l = min(len(sha1_file), len(sha1_hash))\n return sha1.hexdigest()[0:l] == sha1_hash[0:l]" ]
[ "0.6614705", "0.62321055", "0.5942641", "0.5849977", "0.5737562", "0.5682555", "0.567223", "0.5630804", "0.5596855", "0.5581484", "0.5557663", "0.547344", "0.5443973", "0.5370834", "0.5353757", "0.531475", "0.52927846", "0.52644974", "0.5246912", "0.52463406", "0.5242865", "0.5238636", "0.52239746", "0.52235824", "0.521811", "0.5209496", "0.51931465", "0.5185765", "0.5172168", "0.5168687", "0.516811", "0.5163386", "0.5160002", "0.5149851", "0.5141457", "0.51396364", "0.5136092", "0.5135612", "0.5127691", "0.5123277", "0.5120523", "0.5119943", "0.51012045", "0.50918406", "0.50864935", "0.50864935", "0.50700796", "0.5050496", "0.50449854", "0.5043318", "0.5038709", "0.5035623", "0.5029932", "0.50249183", "0.50248605", "0.50181997", "0.5016651", "0.5008807", "0.5008807", "0.5002052", "0.49997884", "0.4993291", "0.4992923", "0.49923155", "0.49830168", "0.49826756", "0.49825722", "0.4975556", "0.4951944", "0.49512318", "0.49504942", "0.49334225", "0.4929193", "0.49283642", "0.49227425", "0.492054", "0.49176553", "0.4917641", "0.49173984", "0.4914816", "0.49118996", "0.49069333", "0.49057826", "0.49007693", "0.48995", "0.489894", "0.48962483", "0.48928794", "0.48925894", "0.4878146", "0.48755398", "0.4875513", "0.4875278", "0.48606917", "0.48546964", "0.48545286", "0.48530683", "0.48456693", "0.48443332", "0.48434153" ]
0.6733674
0
Create the translations model for the shared model 'model'. 'related_name' is the related name for the reverse FK from the translations model. 'meta' is a (optional) dictionary of attributes for the translations model's inner Meta class. 'fields' is a dictionary of fields to put on the translations model.
def create_translations_model(model, related_name, meta, **fields): if not meta: meta = {} unique = [('language_code', 'master')] meta['unique_together'] = list(meta.get('unique_together', [])) + unique # Create inner Meta class Meta = type('Meta', (object,), meta) name = '%sTranslation' % model.__name__ attrs = {} attrs.update(fields) attrs['Meta'] = Meta attrs['__module__'] = model.__module__ attrs['language_code'] = models.CharField(max_length=15, db_index=True) # null=True is so we can prevent cascade deletion attrs['master'] = models.ForeignKey(model, related_name=related_name, editable=False, null=True) # Create and return the new model return ModelBase(name, (BaseTranslationModel,), attrs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_translations_model(shared_model, related_name, meta, **fields):\n if not meta:\n meta = {}\n\n if shared_model._meta.abstract:\n # This can't be done, because `master = ForeignKey(shared_model)` would fail.\n raise TypeError(\"Can't create TranslatedFieldsModel for abstract class {0}\".format(shared_model.__name__))\n\n # Define inner Meta class\n meta['app_label'] = shared_model._meta.app_label\n meta['db_tablespace'] = shared_model._meta.db_tablespace\n meta['managed'] = shared_model._meta.managed\n meta['unique_together'] = list(meta.get('unique_together', []))\n meta.setdefault('db_table', '{0}_translation'.format(shared_model._meta.db_table))\n meta.setdefault('verbose_name', _lazy_verbose_name(shared_model))\n\n # Avoid creating permissions for the translated model, these are not used at all.\n # This also avoids creating lengthy permission names above 50 chars.\n if django.VERSION >= (1,7):\n meta.setdefault('default_permissions', ())\n\n # Define attributes for translation table\n name = str('{0}Translation'.format(shared_model.__name__)) # makes it bytes, for type()\n\n attrs = {}\n attrs.update(fields)\n attrs['Meta'] = type(str('Meta'), (object,), meta)\n attrs['__module__'] = shared_model.__module__\n attrs['objects'] = models.Manager()\n attrs['master'] = models.ForeignKey(shared_model, related_name=related_name, editable=False, null=True)\n\n # Create and return the new model\n translations_model = TranslatedFieldsModelBase(name, (TranslatedFieldsModel,), attrs)\n\n # Register it as a global in the shared model's module.\n # This is needed so that Translation model instances, and objects which refer to them, can be properly pickled and unpickled.\n # The Django session and caching frameworks, in particular, depend on this behaviour.\n mod = sys.modules[shared_model.__module__]\n setattr(mod, name, translations_model)\n\n return translations_model", "def contribute_translations(cls, rel):\n opts = cls._meta\n opts.translations_accessor = rel.get_accessor_name()\n opts.translations_model = rel.model\n opts.translations_cache = '%s_cache' % rel.get_accessor_name()\n trans_opts = opts.translations_model._meta\n \n # Set descriptors\n for field in trans_opts.fields:\n if field.name == 'pk':\n continue\n if field.name == 'master':\n continue\n if field.name == opts.translations_model._meta.pk.name:\n continue\n if field.name == 'language_code':\n attr = LanguageCodeAttribute(opts)\n else:\n attr = TranslatedAttribute(opts, field.name)\n setattr(cls, field.name, attr)", "def create_intermediary_table_model(model):\n name = model.__name__ + 'Relation'\n \n class Meta:\n db_table = '%s_relation' % model._meta.db_table\n unique_together = (('tag', 'content_type', 'object_id'),)\n\n def obj_unicode(self):\n return u'%s [%s]' % (self.content_type.get_object_for_this_type(pk=self.object_id), self.tag)\n \n # Set up a dictionary to simulate declarations within a class \n attrs = {\n '__module__': model.__module__,\n 'Meta': Meta,\n 'tag': models.ForeignKey(model, verbose_name=_('tag'), related_name='items'),\n 'content_type': models.ForeignKey(ContentType, verbose_name=_('content type')),\n 'object_id': models.PositiveIntegerField(_('object id'), db_index=True),\n 'content_object': generic.GenericForeignKey('content_type', 'object_id'),\n '__unicode__': obj_unicode,\n }\n\n return type(name, (models.Model,), attrs)", "def _get_studio_action_translations(self, model, **kwargs):\n domain = ['|', ('name', '=', model.model), ('name', 'ilike', model.model + ',')]\n\n # search view + its inheritancies\n views = request.env['ir.ui.view'].search([('model', '=', model.model)])\n domain = ['|', '&', ('name', '=', 'ir.ui.view,arch_db'), ('res_id', 'in', views.ids)] + domain\n\n def make_domain(fld, rec):\n name = \"%s,%s\" % (fld.model_name, fld.name)\n return ['&', ('res_id', '=', rec.id), ('name', '=', name)]\n\n def insert_missing(fld, rec):\n if not fld.translate:\n return []\n\n if fld.related:\n try:\n # traverse related fields up to their data source\n while fld.related:\n rec, fld = fld.traverse_related(rec)\n if rec:\n return ['|'] + domain + make_domain(fld, rec)\n except AccessError:\n return []\n\n assert fld.translate and rec._name == fld.model_name\n request.env['ir.translation'].insert_missing(fld, rec)\n return []\n\n # insert missing translations of views\n for view in views:\n for name, fld in view._fields.items():\n domain += insert_missing(fld, view)\n\n # insert missing translations of model, and extend domain for related fields\n record = request.env[model.model].search([], limit=1)\n if record:\n for name, fld in record._fields.items():\n domain += insert_missing(fld, record)\n\n action = {\n 'name': _('Translate view'),\n 'type': 'ir.actions.act_window',\n 'res_model': 'ir.translation',\n 'view_mode': 'tree',\n 'views': [[request.env.ref('base.view_translation_dialog_tree').id, 'list']],\n 'target': 'current',\n 'domain': domain,\n }\n\n return action", "def createObject(self, *args):\n return _libsbml.QualModelPlugin_createObject(self, *args)", "def createObject(self, *args):\n return _libsbml.CompModelPlugin_createObject(self, *args)", "def related(self, name=None, reverse_lookup=False):\n\t\tif not name and reverse:\n\t\t\tfrom .models import Related\n\t\t\tmodel = self\n\t\t\tif hasattr(self, 'parent_model'):\n\t\t\t\tmodel = self.parent_model\n\t\t\tct = ContentType.objects.get_for_model(model)\n\t\t\tret = Related.objects.filter(related_content_type=ct.pk, related_object_id=self.pk).order_by('content_type__model', 'object_id')\n\t\t\treturn ret\n\n\t\tif not name:\n\t\t\traise Exception('Need a related item name to lookup!')\n\n\t\t# Convert to list if needed\n\t\tif isinstance(name, str):\n\t\t\tname = [name]\n\n\t\t# Grab this model's content type\n\t\tcontent_type = ContentType.objects.get_for_model(type(self))\n\n\t\t# Grab model paths via aliases and combine with dot-notation model names\n\t\tmodel_paths = [v[1] for v in self.related_overrides.get(self.related_override_key(), self.related_models) if v[0] in name] + [v for v in name if '.' in v]\n\t\t# Grab related content types\n\t\trelated_content_types = [ContentType.objects.get_for_model(apps.get_model(*model_path.split('.'))) for model_path in model_paths]\n\n\t\t# Set to/from fields\n\t\tfields = ['object_id', 'content_type', 'content_object', 'content_type_id']\n\t\t_from = dict(zip(fields, fields))\n\t\t_to = {k: 'related_{}'.format(v) for (k, v) in _from.items()}\n\n\t\t# Switch to/from if reversed\n\t\tif reverse_lookup:\n\t\t\t_from, _to = _to, _from\n\n\t\targs = {\n\t\t\t_from['content_type']: content_type,\n\t\t\t_from['object_id']: self.pk,\n\t\t\t'{}__in'.format(_to['content_type']): related_content_types,\n\t\t}\n\n\t\tif not reverse_lookup:\n\t\t\targs['group__in'] = name\n\n\t\t# Get relations\n\t\tfrom .models import Related\n\t\trelations = Related.objects.filter(**args)\n\n\t\t# For reverse lookup, if there's only one related content type, query those models directly\n\t\tif reverse_lookup and len(related_content_types) == 1:\n\t\t\treturn related_content_types[0].model_class().objects.filter(pk__in=relations.values('object_id')).public()\n\t\t# Otherwise, prefetch in bulk and cache each content type separately\n\t\telse:\n\t\t\tself.prefetch_relations(relations, _to)\n\t\t\treturn [getattr(relation, '_content_object_cache') for relation in relations if hasattr(relation, '_content_object_cache')]", "def save_translations(cls, instance, **kwargs):\n opts = cls._meta\n if hasattr(instance, opts.translations_cache):\n trans = getattr(instance, opts.translations_cache)\n if not trans.master_id:\n trans.master = instance\n trans.save()", "def create_models( self ):", "def from_django_model(dj_model, no_follow_fields=None, no_follow_models={}, **kwargs):\n \"\"\" Create a protocol buffer message from a django type.\n By default all args will be included in the new message.\n If follow_related is set messages will also be created for foreign key\n types. A list of generated messages will be returned.\n Accepted Args:\n msg_name - (str) The name of the message\n dj_model - (Type) A reference to the django\n model type\n include - (List) A list of field names from the model to\n be included in the mapping\n exclude - (List) A list of field names to exclude from the\n model. Only used if included arg is []\n follow_related - (bool)Follow into relation fields if true or \n using pk if false\n no_follow_fields - (List) Field names which should not be followed.\n Only used if follow_related=True\n no_follow_models - (List) Django models not to follow as relations\n \"\"\"\n # Check for existing pbandj dj model. If not found create one\n model = no_follow_models.get(dj_model, None)\n if model:\n return model\n else:\n model = no_follow_models.setdefault(dj_model, Model(dj_model._meta.object_name))\n \n # Local fields\n django_class_fields = [field for field in \n dj_model._meta.local_fields +\n dj_model._meta.many_to_many]\n django_field_by_name = {}\n \n # Iterate through Django fields and init dict that allows\n # lookup of fields by name.\n for field in django_class_fields:\n django_field_by_name[field.name] = field\n \n field_set = set()\n \n # Remove excluded fields or add included fields\n include = kwargs.get('include', [])\n exclude = kwargs.get('exclude', [])\n if include != []: \n # Assert that the fields all exist\n include_set = set(include)\n assert include_set == set(django_field_by_name.keys()) & include_set\n field_set = include_set\n else:\n useable_field_names = set(django_field_by_name.keys()) - set(exclude)\n field_set = useable_field_names\n \n # Add a field for each remaining django field in the set\n for field_name in field_set:\n field = django_field_by_name[field_name]\n model.fields.append(create_field(field, no_follow_models=no_follow_models, **kwargs))\n \n # Add a reverse field for reverse relationships\n related_field_list = dj_model._meta.get_all_related_objects() + dj_model._meta.get_all_related_many_to_many_objects()\n for related_field in related_field_list:\n model.related_fields.append(create_field(related_field.field, no_follow_models=no_follow_models, **kwargs))\n \n return model", "def create_model(self):\n pass", "def create_model(self):\n pass", "def create_model(name, fields=''):\n if '/' in name:\n blueprint_name, model_name = name.split('/')\n output_file = 'blueprints/%s/models.py' % blueprint_name\n else:\n model_name = name\n output_file = 'models.py'\n model = create_model.model_scaffold % dict(model_name=model_name.capitalize())\n\n field_declares = []\n field_inits = []\n init_args = []\n for f in fields.split():\n splitted = f.split(':')\n if len(splitted) > 1:\n field_name, field_type = splitted[0], 'db.%s' % splitted[1]\n else:\n field_name, field_type = splitted[0], 'db.Text'\n field_declares.append(create_model.field_declare % dict(field_name=field_name, field_type=field_type))\n field_inits.append(create_model.field_init % dict(field_name=field_name))\n init_args.append(field_name)\n\n field_declares = '\\n'.join(field_declares)\n\n init_args = (', %s' % ', '.join(init_args)) if init_args else ''\n init_body = '\\n'.join(field_inits) if field_inits else '%spass' % (' ' * 8)\n init_method = ' def __init__(self%s):\\n%s' % (init_args, init_body)\n\n file_exists = os.path.exists(output_file)\n with open(output_file, 'a') as out_file:\n model = '%(base)s%(field_declares)s\\n\\n%(init_method)s' % dict(base=model,\n field_declares=field_declares,\n init_method=init_method)\n if not file_exists:\n model = '%(imports)s\\n%(rest)s' % dict(imports=create_model.imports, rest=model)\n out_file.write(model)\n create_model_form(name, fields)", "def createObject(self, *args):\n return _libsbml.MultiModelPlugin_createObject(self, *args)", "def create(self,**extra_fields):\r\n print(extra_fields)\r\n data = self.model(**extra_fields)\r\n data.save(using=self._db)", "def insert_translations(self):\n if self.verbosity > 1:\n self.stdout.write(\n f' Deleting old translations...'\n )\n\n ElectionNameTranslator.objects.all().delete()[0]\n\n sql_str = \"\"\"\n INSERT INTO voters_electionnametranslator (date, raw_name, clean_name)\n SELECT \n a.election_lbl as date,\n a.election_desc as raw_name,\n trim(\n substring(\n a.election_desc from '^(?:(?:\\d{1,2}\\/|-){0,2}\\d{2,5})?\\s?(.+)$'\n )\n ) as clean_name\n FROM (\n SELECT DISTINCT election_lbl, election_desc\n FROM voter_ncvhis\n ) as a;\n \"\"\"\n with connection.cursor() as cursor:\n cursor.execute(sql_str)\n if self.verbosity > 1:\n self.stdout.write(\n f' Inserted {cursor.rowcount} translations...'\n )\n\n return", "def create_model_form(name, fields=''):\n if '/' in name:\n blueprint_name, model_name = name.split('/')\n output_file = 'blueprints/%s/forms.py' % blueprint_name\n else:\n model_name = name\n output_file = 'forms.py'\n file_exists = os.path.exists(output_file)\n field_args = []\n for f in fields.split():\n field_name = f.split(':')[0]\n field_args.append(create_model_form.field_args % dict(field_name=field_name))\n form = create_model_form.form_scaffold % dict(model_name=model_name.capitalize(), field_args=''.join(field_args))\n with open(output_file, 'a') as out_file:\n if not file_exists:\n form = '''%(imports)s\\n%(rest)s''' % dict(imports=create_model_form.imports,\n rest=form)\n out_file.write(form)", "def from_model(cls, model):\n meta = model._meta\n model_sig = cls(db_tablespace=meta.db_tablespace,\n index_together=meta.index_together,\n model_name=meta.object_name,\n pk_column=six.text_type(meta.pk.column),\n table_name=meta.db_table,\n unique_together=meta.unique_together,\n unique_together_applied=True)\n\n if getattr(meta, 'constraints', None):\n # Django >= 2.2\n for constraint in meta.original_attrs['constraints']:\n model_sig.add_constraint(constraint)\n\n if getattr(meta, 'indexes', None):\n # Django >= 1.11\n for index in meta.original_attrs['indexes']:\n model_sig.add_index(index)\n\n for field in meta.local_fields + meta.local_many_to_many:\n # Don't generate a signature for generic relations.\n if not isinstance(field, GenericRelation):\n model_sig.add_field(field)\n\n return model_sig", "def models_from_mod_setup(data, mod_opts=None):\n opts = []\n if isinstance(data, dict):\n if not data.get('model', None):\n return mod_opts\n cur_vars = data.get('variations', [])\n if data.get('var_name', None):\n cur_vars = [{data['var_name']: v} for v in cur_vars]\n consts = data.get('consts', {})\n cur_opts = [consts] if not cur_vars else [dict(**v, **consts) for v in cur_vars]\n related_name = data.get('related_name', None)\n if not related_name:\n return cur_opts\n elif data and isinstance(data, list):\n cur_opts = mod_opts or [] # Current list of main model options, Eventually it will have all possible combos\n for ea in data:\n cur_opts = models_from_mod_setup(ea, mod_opts=cur_opts)\n data = data[0]\n else:\n raise ImproperlyConfigured(\"Expected a properly configured dict, or a list of them for models_from_mod_setup. \")\n st_field, st = data.get('unique_str', (None, ''))\n for i, r in enumerate(cur_opts):\n label = {st_field: st.format(i)} if st_field else {}\n obj = data['model'].objects.create(**r, **label)\n if related_name:\n opts += [dict(**{related_name: obj}, **m) for m in mod_opts]\n else:\n opts.append(obj)\n return opts", "def get_meta(table_name, constraints, column_to_field_name):\n # unique_together = []\n # for index, params in constraints.items():\n # if params['unique']:\n # columns = params['columns']\n # if len(columns) > 1:\n # we do not want to include the u\"\" or u'' prefix\n # so we build the string rather than interpolate the tuple\n # tup = '(' + ', '.join(\"'%s'\" % column_to_field_name[c] for c in columns) + ')'\n # unique_together.append(tup)\n\n return type('Meta', (),\n dict(managed=False, db_table=table_name, app_label='layers'))\n # if unique_together:\n # tup = '(' + ', '.join(unique_together) + ',)'\n # meta += [\" unique_together = %s\" % tup]\n # return meta", "def _create(self, model_obj: Any):\n conn = self.provider.get_connection()\n\n try:\n model_obj.save(\n refresh=True,\n index=self.model_cls._index._name,\n using=conn,\n )\n except Exception as exc:\n logger.error(f\"Error while creating: {exc}\")\n raise\n\n return model_obj", "def save(self, *args, **kwargs):\n\t\tconflicting_instance = Translation.objects.filter(\n\t\t\tterm=self.term,\n\t\t\tlanguage=self.language,\n\t\t\tvenue__isnull=True\n\t\t)\n\n\t\tif self.pk:\n\t\t\tconflicting_instance = conflicting_instance.exclude(pk=self.pk)\n\n\t\tif conflicting_instance.exists():\n\t\t\traise ValidationError({\n\t\t\t\t'error':\n\t\t\t\t\t'Generic translation for this term (%s) and language (%s) already exists.' % (\n\t\t\t\t\t\tself.term, self.language\n\t\t\t\t\t)\n\t\t\t})\n\n\t\tsuper(Translation, self).save(*args, **kwargs)", "def from_model(cls, model):\n meta = model._meta\n model_sig = cls(db_tablespace=meta.db_tablespace,\n index_together=deepcopy(meta.index_together),\n model_name=meta.object_name,\n pk_column=six.text_type(meta.pk.column),\n table_name=meta.db_table,\n unique_together=deepcopy(meta.unique_together))\n model_sig._unique_together_applied = True\n\n if getattr(meta, 'indexes', None):\n for index in meta.original_attrs['indexes']:\n model_sig.add_index(index)\n\n for field in meta.local_fields + meta.local_many_to_many:\n # Don't generate a signature for generic relations.\n if not isinstance(field, GenericRelation):\n model_sig.add_field(field)\n\n return model_sig", "def call( # type: ignore[override]\n self,\n instance: Model,\n step: builder.BuildStep,\n context: declarations.PostGenerationContext\n ) -> None:\n related_manager = getattr(instance, self.descriptor_name)\n # Get the right field names from the intermediary m2m table.\n source_field = related_manager.through._meta.get_field(\n related_manager.source_field_name\n )\n if isinstance(instance, source_field.related_model):\n # The source_field points to the instance's model.\n source = related_manager.source_field_name\n target = related_manager.target_field_name\n else:\n source = related_manager.target_field_name\n target = related_manager.source_field_name\n\n # Add the relation.\n for related_object in super().call(instance, step, context):\n related_manager.through.objects.create(\n **{source: instance, target: related_object}\n )", "def _build_model(self, **kwargs):\n pass", "def createSubmodel(self):\n return _libsbml.CompModelPlugin_createSubmodel(self)", "def save_object(self,\n obj_dict,\n custom_values={},\n recursive_custom_values={},\n **kwargs):\n app_name = obj_dict['app_name']\n model_name = obj_dict['model_name']\n model_obj = self.app_model(app_name, model_name)\n\n # kwargs\n related_fields = kwargs.get('related_fields', {}).copy()\n parent_obj = kwargs.get('parent_obj', None)\n\n # se uno degli attributi ha oggetti innestati e type == m2m rimuovere attr, salvare e usare .add() sull'obj salvato per aggiungere gli m2m\n # move m2m definition to a private collection and then purge them from original object\n m2ms = { i:[e for e in obj_dict['object'][i]] for i in obj_dict['m2m']}\n\n # relation to the father\n if parent_obj:\n obj_dict['object'][obj_dict['related_field']] = parent_obj\n\n # save obj without optional m2m\n print('saving:', obj_dict['object'])\n # detect and fetch fk\n save_dict=self.get_save_dict(model_obj, obj_dict)\n\n print(save_dict)\n\n save_dict.update(custom_values)\n\n # save recursive custom values\n for k,v in recursive_custom_values.items():\n if k in obj_dict['fields']:\n save_dict[k] = v\n\n # if duplication create new object\n if obj_dict.get('duplicate'):\n obj = model_obj.objects.create(**save_dict)\n # else update existent object with dict data\n else:\n obj = model_obj.objects.get(pk=obj_dict.get('source_pk'))\n obj.__dict__.update(**save_dict)\n obj.save()\n\n # save parent related fields (if object hasn't them)\n for k,v in related_fields.items():\n if k == obj_dict.get('related_field'): continue\n # if k not in obj_dict['object']: continue\n setattr(obj, k, v)\n obj.save()\n\n print('saved: {} {} ({})'.format(app_name, model_name, obj.__dict__))\n print()\n\n # save each m2m\n self.save_m2m(obj, m2ms)\n\n # set related fields and pass them to childs\n if obj_dict.get('related_field'):\n related_fields[obj_dict['related_field']] = getattr(obj, obj_dict['related_field'])\n\n # recursive call\n for child in obj_dict['childrens']:\n # only if save==True, object is saved.\n # else it is ignored, is a duplicated of same item!\n if child['save']:\n self.save_object(child,\n recursive_custom_values=recursive_custom_values,\n parent_obj=obj,\n related_fields=related_fields)\n return obj", "def build_model(cls, args, task):\n if not hasattr(args, \"max_source_positions\"):\n args.max_source_positions = 1024\n if not hasattr(args, \"max_target_positions\"):\n args.max_target_positions = 1024\n\n src_langs = [lang_pair.split(\"-\")[0] for lang_pair in task.lang_pairs]\n tgt_langs = [lang_pair.split(\"-\")[1] for lang_pair in task.lang_pairs]\n\n if args.share_encoders:\n args.share_encoder_embeddings = True\n if args.share_decoders:\n args.share_decoder_embeddings = True\n\n # encoders/decoders for each language\n lang_encoders, lang_decoders = {}, {}\n\n def get_encoder(lang, shared_encoder_embed_tokens=None):\n if lang not in lang_encoders:\n src_dict = task.dicts[lang]\n if shared_encoder_embed_tokens is None:\n encoder_embed_tokens = common_layers.Embedding(\n num_embeddings=len(src_dict),\n embedding_dim=args.encoder_embed_dim,\n padding_idx=src_dict.pad(),\n freeze_embed=args.encoder_freeze_embed,\n normalize_embed=getattr(args, \"encoder_normalize_embed\", False),\n )\n utils.load_embedding(\n embedding=encoder_embed_tokens,\n dictionary=src_dict,\n pretrained_embed=args.encoder_pretrained_embed,\n )\n else:\n encoder_embed_tokens = shared_encoder_embed_tokens\n lang_encoders[lang] = cls.single_model_cls.build_encoder(\n args, src_dict, embed_tokens=encoder_embed_tokens\n )\n return lang_encoders[lang]\n\n def get_decoder(lang, shared_decoder_embed_tokens=None):\n \"\"\"\n Fetch decoder for the input `lang`, which denotes the target\n language of the model\n \"\"\"\n if lang not in lang_decoders:\n tgt_dict = task.dicts[lang]\n if shared_decoder_embed_tokens is None:\n decoder_embed_tokens = common_layers.Embedding(\n num_embeddings=len(tgt_dict),\n embedding_dim=args.decoder_embed_dim,\n padding_idx=tgt_dict.pad(),\n freeze_embed=args.decoder_freeze_embed,\n )\n utils.load_embedding(\n embedding=decoder_embed_tokens,\n dictionary=tgt_dict,\n pretrained_embed=args.decoder_pretrained_embed,\n )\n else:\n decoder_embed_tokens = shared_decoder_embed_tokens\n lang_decoders[lang] = cls.single_model_cls.build_decoder(\n args, task.dicts[lang], tgt_dict, embed_tokens=decoder_embed_tokens\n )\n return lang_decoders[lang]\n\n # shared encoders/decoders (if applicable)\n shared_encoder, shared_decoder = None, None\n if args.share_encoders:\n shared_encoder = get_encoder(src_langs[0])\n if args.share_decoders:\n shared_decoder = get_decoder(tgt_langs[0])\n\n shared_encoder_embed_tokens, shared_decoder_embed_tokens = None, None\n if args.share_encoder_embeddings:\n shared_encoder_embed_tokens = FairseqMultiModel.build_shared_embeddings(\n dicts=task.dicts,\n langs=src_langs,\n embed_dim=args.encoder_embed_dim,\n build_embedding=common_layers.build_embedding,\n pretrained_embed_path=None,\n )\n if args.share_decoder_embeddings:\n shared_decoder_embed_tokens = FairseqMultiModel.build_shared_embeddings(\n dicts=task.dicts,\n langs=tgt_langs,\n embed_dim=args.decoder_embed_dim,\n build_embedding=common_layers.build_embedding,\n pretrained_embed_path=None,\n )\n encoders, decoders = OrderedDict(), OrderedDict()\n for lang_pair, src_lang, tgt_lang in zip(task.lang_pairs, src_langs, tgt_langs):\n encoders[lang_pair] = (\n shared_encoder\n if shared_encoder is not None\n else get_encoder(\n src_lang, shared_encoder_embed_tokens=shared_encoder_embed_tokens\n )\n )\n decoders[lang_pair] = (\n shared_decoder\n if shared_decoder is not None\n else get_decoder(\n tgt_lang, shared_decoder_embed_tokens=shared_decoder_embed_tokens\n )\n )\n\n return cls(task, encoders, decoders)", "def add_translations(self, translations):\n for translation in translations:\n self.add_field_translation(translation)", "def update_related_model_fields(self,\n field_name: str,\n related_metafield: Field,\n obj: Model,\n related_fields: Fields,\n legal_reason: Optional[LegalReason] = None,\n purpose: Optional[\"AbstractPurpose\"] = None,\n anonymization: bool = True):\n related_attribute = getattr(obj, field_name, None)\n if related_metafield.one_to_many or related_metafield.many_to_many:\n for related_obj in related_attribute.all():\n related_fields.anonymizer.update_obj(\n related_obj, legal_reason, purpose, related_fields,\n base_encryption_key=self._get_encryption_key(obj, field_name),\n anonymization=anonymization\n )\n elif related_metafield.many_to_one or related_metafield.one_to_one:\n if related_attribute is not None:\n related_fields.anonymizer.update_obj(\n related_attribute, legal_reason, purpose, related_fields,\n base_encryption_key=self._get_encryption_key(obj, field_name),\n anonymization=anonymization\n )\n else:\n warnings.warn(f'Model anonymization discovered unreachable field {field_name} on model'\n f'{obj.__class__.__name__} on obj {obj} with pk {obj.pk}')", "def createObject(self, *args):\n return _libsbml.FbcModelPlugin_createObject(self, *args)", "def get_model(self) -> BaseLanguageModel:\n model = available_models[self.model_name.value]\n kwargs = model._lc_kwargs\n secrets = {secret: getattr(model, secret) for secret in model.lc_secrets.keys()}\n kwargs.update(secrets)\n\n model_kwargs = kwargs.get(\"model_kwargs\", {})\n for attr, value in self.dict().items():\n if attr == \"model_name\":\n # Skip model_name\n continue\n if hasattr(model, attr):\n # If the model has the attribute, add it to kwargs\n kwargs[attr] = value\n else:\n # Otherwise, add it to model_kwargs (necessary for chat models)\n model_kwargs[attr] = value\n kwargs[\"model_kwargs\"] = model_kwargs\n\n # Initialize a copy of the model using the config\n model = model.__class__(**kwargs)\n return model", "def update_model_fields(self, model, payload):\n fk_fields = model.fk_field_names()\n models_fields = {\n i: payload[i]\n for i, mapping in model.fields.items()\n if i not in fk_fields\n }\n for i, mapping in model.fields.items():\n if i in fk_fields:\n try:\n models_fields[i] = self.render_relation_field(\n mapping, payload[i]\n )\n except SkipField:\n models_fields.pop(i, None)\n model.update(models_fields)\n model.remote_instance = self.create_remote_instance(payload)\n return model", "def prepare_model(self, **kwargs):\n pass", "def onBuildModels(self):\n if self.refSeriesNumber != '-1':\n ref = self.refSeriesNumber\n refLongName = self.seriesMap[ref]['LongName']\n labelNodes = slicer.util.getNodes('*'+refLongName+'*-label*')\n\n numNodes = slicer.mrmlScene.GetNumberOfNodesByClass( \"vtkMRMLModelHierarchyNode\" )\n outHierarchy = None\n\n for n in xrange(numNodes):\n node = slicer.mrmlScene.GetNthNodeByClass( n, \"vtkMRMLModelHierarchyNode\" )\n if node.GetName() == 'mpReview-'+refLongName:\n outHierarchy = node\n break\n\n # Remove the previous models\n if outHierarchy:\n collection = vtk.vtkCollection()\n outHierarchy.GetChildrenModelNodes(collection)\n n = collection.GetNumberOfItems()\n if n != 0:\n for i in xrange(n):\n modelNode = collection.GetItemAsObject(i)\n slicer.mrmlScene.RemoveNode(modelNode)\n\n # if models hierarchy does not exist, create it.\n else:\n outHierarchy = slicer.vtkMRMLModelHierarchyNode()\n outHierarchy.SetScene( slicer.mrmlScene )\n outHierarchy.SetName( 'mpReview-'+refLongName )\n slicer.mrmlScene.AddNode( outHierarchy )\n\n progress = self.makeProgressIndicator(len(labelNodes))\n step = 0\n for label in labelNodes.values():\n labelName = label.GetName().split(':')[1]\n structureName = labelName[labelName[:-6].rfind(\"-\")+1:-6]\n # Only save labels with known structure names\n if any(structureName in s for s in self.structureNames):\n parameters = {}\n parameters[\"InputVolume\"] = label.GetID()\n parameters['FilterType'] = \"Sinc\"\n parameters['GenerateAll'] = True\n\n parameters[\"JointSmoothing\"] = False\n parameters[\"SplitNormals\"] = True\n parameters[\"PointNormals\"] = True\n parameters[\"SkipUnNamed\"] = True\n\n # create models for all labels\n parameters[\"StartLabel\"] = -1\n parameters[\"EndLabel\"] = -1\n\n parameters[\"Decimate\"] = 0\n parameters[\"Smooth\"] = 0\n\n parameters[\"ModelSceneFile\"] = outHierarchy\n\n progress.labelText = '\\nMaking Model for %s' % structureName\n progress.setValue(step)\n if progress.wasCanceled:\n break\n\n try:\n modelMaker = slicer.modules.modelmaker\n self.CLINode = slicer.cli.run(modelMaker, self.CLINode,\n parameters, wait_for_completion=True)\n except AttributeError:\n qt.QMessageBox.critical(slicer.util.mainWindow(),'Editor', 'The ModelMaker module is not available<p>Perhaps it was disabled in the application settings or did not load correctly.')\n step += 1\n progress.close()\n #\n\n if outHierarchy:\n collection = vtk.vtkCollection()\n outHierarchy.GetChildrenModelNodes(collection)\n n = collection.GetNumberOfItems()\n if n != 0:\n for i in xrange(n):\n modelNode = collection.GetItemAsObject(i)\n displayNode = modelNode.GetDisplayNode()\n displayNode.SetSliceIntersectionVisibility(1)\n displayNode.SetSliceIntersectionThickness(2)\n self.modelsVisibilityButton.checked = False\n self.updateViewRenderer()", "def __init__(\n self,\n parent_model: 'Any',\n model: 'Any',\n info: 'ResolveInfo',\n graphql_args: dict,\n ):\n super().__init__()\n self.info: 'ResolveInfo' = info\n self.graphql_args: dict = graphql_args\n\n self.model: 'Any' = model\n self.parent_model: 'Any' = parent_model\n self.parent_model_pks: 'Tuple[str, ...]' = self._get_model_pks(\n self.parent_model\n )\n self.parent_model_pk_fields: tuple = tuple(\n getattr(self.parent_model, pk) for pk in self.parent_model_pks\n )\n\n self.model_relation_field: str = to_snake_case(self.info.field_name)\n\n self.relation: 'Any' = getattr(\n self.parent_model, self.model_relation_field\n )", "def set_name_translation(self):\n\t\tcurrent = self.get_name_translation()\n\t\tif not self.label:\n\t\t\tif current:\n\t\t\t\t# clear translation\n\t\t\t\tfrappe.delete_doc(\"Translation\", current.name)\n\t\t\treturn\n\n\t\tif not current:\n\t\t\tfrappe.get_doc(\n\t\t\t\t{\n\t\t\t\t\t\"doctype\": \"Translation\",\n\t\t\t\t\t\"source_text\": self.doc_type,\n\t\t\t\t\t\"translated_text\": self.label,\n\t\t\t\t\t\"language_code\": frappe.local.lang or \"en\",\n\t\t\t\t}\n\t\t\t).insert()\n\t\t\treturn\n\n\t\tif self.label != current.translated_text:\n\t\t\tfrappe.db.set_value(\"Translation\", current.name, \"translated_text\", self.label)\n\t\t\tfrappe.translate.clear_cache()", "def add_m2m_factories(self) -> None:\n opts = self.model._meta\n for rel in get_model_relations(self.model):\n if not rel.many_to_many:\n continue\n if self.model == rel.field.model:\n # The ManyToManyField is declared on model.\n related_model = rel.field.related_model\n descriptor_name = rel.field.name\n declaration_name = rel.field.name\n elif self.model == rel.field.related_model:\n # The ManyToManyField is declared on the related_model;\n # working on a 'reverse' m2m relation\n related_model = rel.field.model\n descriptor_name = rel.get_accessor_name()\n declaration_name = rel.name\n else:\n # Rel is an inherited relation as neither end of the relation\n # points to self.model.\n # One relation points to the inherited parent model, the other\n # to the actual related model. If rel.field.model is the parent,\n # the related_model is rel.field.related_model and vice versa.\n if rel.field.model in opts.parents:\n # self.model inherited the actual ManyToManyField.\n # Use the inherited ManyToManyField's name for descriptor\n # and declaration.\n related_model = rel.field.related_model\n descriptor_name = rel.field.name\n declaration_name = rel.field.name\n elif rel.field.related_model in opts.parents:\n # self.model inherited the reverse ManyToManyRelation\n related_model = rel.field.model\n descriptor_name = rel.get_accessor_name()\n declaration_name = rel.name\n else:\n raise TypeError(\n \"Unknown relation: {!s}\".format(rel.get_path_info())\n )\n factory_name = self._get_factory_name_for_model(related_model)\n if not hasattr(self.factory, declaration_name):\n m2m_factory = M2MFactory(\n factory=factory_name,\n descriptor_name=descriptor_name,\n related_model=related_model\n )\n setattr(self.factory, declaration_name, m2m_factory)", "def create_model(self, ApiId: str, Name: str, Schema: str, ContentType: str = None, Description: str = None) -> Dict:\n pass", "def gen_submodels(self, model, options):\n for submodel in options:\n model.submodels.create(id=submodel)", "def create(self, auth_token, name, title=None, optional_parameters=None):\n\n if optional_parameters is None:\n optional_parameters = {}\n\n e = self.my_django_model(name=name, title=title)\n e.save()\n if optional_parameters:\n facade.subsystems.Setter(auth_token, self, e, optional_parameters)\n e.save()\n self.authorizer.check_create_permissions(auth_token, e)\n return e", "def create_model(self):\n model = solph.Model(self.es)\n return model", "def _create(cls, model_class, *args, **kwargs):\n for k in kwargs.keys():\n if k in model_class.relationships():\n rel_key = '{}_id'.format(k)\n kwargs[rel_key] = str(kwargs[k].id)\n obj = super(BaseFactory, cls)._create(model_class, *args, **kwargs)\n obj.save(obj)\n return obj", "def populate_language():\n taxonomy = LanguageTaxonomy()\n taxonomy.families = set(buildconfig.LANGUAGE_FAMILIES)\n\n max_length = Language._meta.get_field('name').max_length\n\n language_objects = []\n for language in taxonomy.languages():\n name = language.name[:max_length]\n language_objects.append(Language(id=language.id, name=name, family=None))\n Language.objects.bulk_create(language_objects)\n\n for language in taxonomy.languages():\n family = taxonomy.family_of(language.name)\n if family is not None:\n src = Language.objects.get(id=language.id)\n target = Language.objects.get(id=family.id)\n src.family = target\n src.save()", "def create_obj(django_model, new_obj_key=None, *args, **kwargs):\n\n try:\n if isinstance(django_model, six.string_types):\n django_model = apps.get_model(django_model)\n assert is_valid_django_model(django_model), (\n \"You need to pass a valid Django Model or a string with format: \"\n '<app_label>.<model_name> to \"create_obj\"'\n ' function, received \"{}\".'\n ).format(django_model)\n\n data = kwargs.get(new_obj_key, None) if new_obj_key else kwargs\n new_obj = django_model(**data)\n new_obj.full_clean()\n new_obj.save()\n return new_obj\n except LookupError:\n pass\n except ValidationError as e:\n raise ValidationError(e.__str__())\n except TypeError as e:\n raise TypeError(e.__str__())\n except Exception as e:\n return e.__str__()", "def __init__(self, model, parent=None, relation=None, reverse=None,\n related_name=None, accessor_name=None, nullable=False,\n depth=0):\n\n self.model = model\n\n self.app_name = model._meta.app_label\n self.model_name = model._meta.object_name\n self.db_table = model._meta.db_table\n self.pk_column = model._meta.pk.column\n\n self.parent = parent\n self.parent_model = parent and parent.model or None\n\n self.relation = relation\n self.reverse = reverse\n\n self.related_name = related_name\n self.accessor_name = accessor_name\n self.nullable = nullable\n self.depth = depth\n\n self.children = []", "def create(context, namespace_name, values, session):\n\n namespace = namespace_api.get(\n context, namespace_name, session)\n\n # if the resource_type does not exist, create it\n resource_type_name = values['name']\n metadef_utils.drop_protected_attrs(\n models.MetadefNamespaceResourceType, values)\n try:\n resource_type = resource_type_api.get(\n context, resource_type_name, session)\n except exc.NotFound:\n resource_type = None\n LOG.debug(\"Creating resource-type %s\", resource_type_name)\n\n if resource_type is None:\n resource_type_dict = {'name': resource_type_name, 'protected': False}\n resource_type = resource_type_api.create(\n context, resource_type_dict, session)\n\n # Create the association record, set the field values\n ns_resource_type_dict = _to_db_dict(\n namespace['id'], resource_type['id'], values)\n new_rec = _create_association(context, namespace_name, resource_type_name,\n ns_resource_type_dict, session)\n\n return _to_model_dict(resource_type_name, new_rec)", "def _create_model(self, key):\n pass", "def create_model(self, **inputs):\n raise NotImplementedError('This method has to be overwritten.')", "def get_translation_for_object(self, lang, obj=None, model=None, object_id=None):\n\n # Gets object model and pk if informed\n if obj:\n model = type(obj)\n object_id = obj.pk\n\n cache_key = self.make_cache_key(model, object_id, lang)\n \n # Checks if there is a cached object for this\n from_cache = cache.get(cache_key, None)\n\n if from_cache:\n return from_cache\n\n # Gets the related content type\n c_type = ContentType.objects.get_for_model(model)\n\n # Gets the translation\n trans = self.get(language=lang, content_type=c_type, object_id=object_id)\n\n # Stores in cache\n cache.set(cache_key, trans)\n\n # Returns the translation object\n return trans", "def _db_field(self):\n type = RELATED_FIELD_MAP[self.type]\n attrs = {\n 'verbose_name': self.verbose_name,\n 'help_text': self.help_text,\n 'blank': not self.required,\n 'null': not self.required,\n 'unique': self.unique,\n 'related_name': self.reverse_name\n }\n\n # special handling for self referential relationships\n if self.related_model.name == self.name:\n return type('self', **attrs)\n else:\n try:\n return type(self.related_model.model, **attrs if self.type != GENERIC else {})\n except ValueError:\n # if related model has not been evaluated by django yet, we need to expose it, but need to be careful\n # of mutually recursive relationships between models creating runtime exceptions, so create a model\n # sans relations, create the relationship then contribute the other models relations\n model = self.related_model._create_deferred_relation_model()\n relation_field = type(self.related_model.name, **attrs if self.type != GENERIC else {})\n self.related_model._contribute_relations(model)\n return relation_field", "def _add_object(self, name, model, *args, **kwargs):\n logger.debug('Adding object with name \"{}\" to model.'.format(name))\n obj = model(weakref.proxy(self), name, *args, **kwargs) # Add hidden hard reference\n self._objects.append(obj)\n return self.get_object(obj.name)", "def MakeModel(self):\n pass", "def actstream_register_model(model):\n for field in ('actor', 'target', 'action_object'):\n generic.GenericRelation(Action,\n content_type_field='%s_content_type' % field,\n object_id_field='%s_object_id' % field,\n related_name='actions_with_%s_%s_as_%s' % (\n model._meta.app_label, model._meta.module_name, field),\n ).contribute_to_class(model, '%s_actions' % field)\n\n setattr(Action, 'actions_with_%s_%s_as_%s' % (model._meta.app_label, model._meta.module_name, field), None)", "def build_model():", "def _ensure_side_model_exists(self):\n # TODO used metaclass for more pythonic\n self.user.create_if_not_exists()\n # Project model need User object exists\n self.project.create_if_not_exists(self._user)", "def create(self, **kwargs):\n reverse_one_to_one_fields = frozenset(kwargs).intersection(\n self.model._meta._reverse_one_to_one_field_names\n )\n if reverse_one_to_one_fields:\n raise ValueError(\n \"The following fields do not exist in this model: %s\"\n % \", \".join(reverse_one_to_one_fields)\n )\n\n obj = self.model(**kwargs)\n self._for_write = True\n obj.save(force_insert=True, using=self.db)\n return obj", "def prepare(self, nlp_model_names, text, sender):\n if self.nlp_model is None:\n # Replace \"NLP_MODEL\" with the name of the NLP models which this module should use.\n self.nlp_model = spacy.load(nlp_model_names[\"NLP_MODEL\"])\n to, when, body = self.nlp(text)\n self.description = None\n return self.prepare_processed(to, when, body, sender)", "def CreateModel(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def relate(self, qs):\n model_map = {}\n item_map = {}\n for item in qs:\n object_id = getattr(item, self._object_id_field)\n content_type = getattr(item, self._content_type_field)\n model_map.setdefault(content_type, {}) \\\n [object_id] = item.id\n item_map[item.id] = item\n for ct, items_ in model_map.items():\n for o in ct.model_class().objects.select_related() \\\n .filter(id__in=items_.keys()).all():\n setattr(item_map[items_[o.id]],self._content_object_field, o)\n return qs", "def produce(namespace, type_name, members):\n\n return ComplexModelMeta(type_name, (ComplexModel,), odict({\n '__namespace__': namespace,\n '__type_name__': type_name,\n '_type_info': TypeInfo(members),\n }))", "def translations(self):\r\n return Translations(self)", "def translations(self):\r\n return Translations(self)", "def _addmodel(self, model: Model):\n model = copy.deepcopy(model)\n\n if self.domain is not None:\n # Check that model and domain are compatible\n self._validate_model_domain(model, self.domain)\n\n # Add in model\n self.model = model\n\n # Setup base namelists\n self._set_base_namelists()\n else:\n self.model = model", "def __init__(self, model, name = None):\n self._main_model = model.get_main_model()\n self._model = model\n self._name = name\n self._var_prefix = '_sub_' + str(SubModel._next_id)\n SubModel._next_id += 1\n if (name is None) or model.is_source_prefix_mangled():\n self._source_prefix_mangled = True\n self._source_prefix = self._var_prefix\n else:\n self._source_prefix_mangled = False\n self._source_prefix = model.get_source_prefix() + name + '.'", "def add_related_factories(self) -> None:\n for rel in get_model_relations(self.model, forward=False):\n if rel.many_to_many:\n continue\n # These are all reverse relations, meaning rel.model == self.model.\n factory_name = self._get_factory_name_for_model(rel.related_model)\n accessor_name = rel.get_accessor_name()\n if not hasattr(self.factory, rel.name):\n related_factory = RelatedFactory(\n factory=factory_name,\n factory_related_name=rel.field.name,\n accessor_name=accessor_name,\n related_model=rel.related_model\n )\n setattr(self.factory, rel.name, related_factory)", "def create(self, attributes=None, **kwargs):\n\n return super(LocalesProxy, self).create(None, attributes)", "def createNewModel(self, modelName):\n try:\n storage = FileSystemStorage(join(settings.MEDIA_ROOT, 'models'))\n\n folderSufix = 1\n new_model_name = modelName\n while storage.exists(join(storage.base_location, new_model_name)):\n folderSufix += 1\n new_model_name = f'{modelName}_{folderSufix}'\n\n folder_path = join(storage.base_location, new_model_name)\n model_file = join(folder_path, f'{new_model_name}.ppl')\n\n if not storage.exists(folder_path):\n os.mkdir(folder_path)\n\n calcEngine = CalcEngine.factory(self.client_session)\n if calcEngine.createNewModel(model_file, new_model_name):\n self.closeModel()\n return self.openModel(join(storage.base_location, new_model_name, f'{new_model_name}.ppl'))\n except Exception as ex:\n raise ex", "def create_mes(self, context, mes):\n mes_info = mes['mes']\n name = mes_info['name']\n mes_info['mes_mapping'] = dict()\n\n if mes_info.get('mesd_template'):\n mesd_name = utils.generate_resource_name(name, 'inline')\n mesd = {'mesd': {\n 'attributes': {'mesd': mes_info['mesd_template']},\n 'description': mes_info['description'],\n 'name': mesd_name,\n 'template_source': 'inline',\n 'tenant_id': mes_info['tenant_id']}}\n mes_info['mesd_id'] = self.create_mesd(context, mesd).get('id')\n\n mesd = self.get_mesd(context, mes['mes']['mesd_id'])\n mesd_dict = yaml.safe_load(mesd['attributes']['mesd'])\n meo_plugin = manager.ApmecManager.get_service_plugins()['MEO']\n\n region_name = mes.setdefault('placement_attr', {}).get(\n 'region_name', None)\n vim_res = self.vim_client.get_vim(context, mes['mes']['vim_id'],\n region_name)\n driver_type = vim_res['vim_type']\n if not mes['mes']['vim_id']:\n mes['mes']['vim_id'] = vim_res['vim_id']\n\n ##########################################\n # Detect MANO driver here:\n # Defined in the Tosca template\n nfv_driver = None\n if mesd_dict['imports'].get('nsds'):\n nfv_driver = mesd_dict['imports']['nsds']['nfv_driver']\n nfv_driver = nfv_driver.lower()\n if mesd_dict['imports'].get('vnffgds'):\n nfv_driver = mesd_dict['imports']['vnffgds']['nfv_driver']\n nfv_driver = nfv_driver.lower()\n\n ##########################################\n def _find_vnf_ins(cd_mes):\n al_ns_id_list = cd_mes['mes_mapping'].get('NS')\n if not al_ns_id_list:\n return None, None\n al_ns_id = al_ns_id_list[0]\n try:\n ns_instance = self._nfv_drivers.invoke(\n nfv_driver, # How to tell it is Tacker\n 'ns_get',\n ns_id=al_ns_id,\n auth_attr=vim_res['vim_auth'], )\n except:\n return None, None\n if ns_instance['status'] != 'ACTIVE':\n return None, None\n al_vnf = ns_instance['vnf_ids']\n al_vnf_dict = ast.literal_eval(al_vnf)\n return ns_instance['id'], al_vnf_dict\n\n def _generic_ns_set(req_vnf_list):\n al_mes_list = self.get_mess(context)\n ns_candidate = dict()\n for al_mes in al_mes_list:\n if al_mes['status'] != \"ACTIVE\":\n continue\n ns_candidate[al_mes['id']] = dict()\n al_ns_id, al_vnf_dict = _find_vnf_ins(al_mes)\n if not al_ns_id:\n continue\n ns_candidate[al_mes['id']] = dict()\n for req_vnf_dict in req_vnf_list:\n for vnf_name, al_vnf_id in al_vnf_dict.items():\n if req_vnf_dict['name'] == vnf_name:\n # Todo: remember to change this with VM capacity\n len_diff = len([lend for lend in al_mes['reused'][vnf_name] if lend > 0])\n avail = len_diff - req_vnf_dict['nf_ins']\n ns_candidate[al_mes['id']].update({vnf_name: avail})\n # The rest will be treated differently by the algorithms\n return ns_candidate\n\n def _run_meso_rsfca(req_vnf_list, ns_candidate=None):\n rsfca_is_accepted = False\n required_info_dict = dict()\n if not ns_candidate:\n ns_candidate = _generic_ns_set(req_nf_list)\n ns_cds = dict()\n deep_ns = dict()\n for ck_mes_id, ns_data_dict in ns_candidate.items():\n # This is the heart of the rvnfa\n if len(ns_data_dict) == len(req_vnf_list):\n nf_ins_list = [nf_ins for nf_name, nf_ins in ns_data_dict.items() if nf_ins >= 0]\n if len(nf_ins_list) == len(req_vnf_list):\n total_ins = sum(nf_ins_list)\n ns_cds[ck_mes_id] = total_ins\n else:\n extra_nf_ins_list = [-nf_ins for nf_name, nf_ins in ns_data_dict.items() if nf_ins < 0]\n total_ins = sum(extra_nf_ins_list)\n deep_ns[ck_mes_id] = total_ins\n if ns_cds:\n selected_mes1 = min(ns_cds, key=ns_cds.get)\n required_info_dict[selected_mes1] = ns_candidate[selected_mes1]\n rsfca_is_accepted = True\n if deep_ns:\n selected_mes2 = min(deep_ns, key=deep_ns.get)\n required_info_dict[selected_mes2] = ns_candidate[selected_mes2]\n rsfca_is_accepted = True\n\n return rsfca_is_accepted, required_info_dict\n\n def _run_meso_rvnfa(req_vnf_list):\n rvnfa_remain_list = list()\n rvnfa_is_accepted = False\n rvnfa_required_info = dict()\n final_candidate = dict() # Consider using the OrderDict\n ns_candidate = _generic_ns_set(req_vnf_list)\n if not ns_candidate:\n return rvnfa_is_accepted, None, req_vnf_list\n for req_vnf_dict in req_vnf_list:\n req_vnf_name = req_vnf_dict['name']\n candidate_set = list()\n for ck_mes_id, mes_data_dict in ns_candidate.items():\n if req_vnf_name in mes_data_dict:\n slots = mes_data_dict[req_vnf_name]\n candidate_set.append({'mes_id': ck_mes_id, 'slots': slots})\n exp_slot_list = [mes_candidate['slots'] for mes_candidate in candidate_set if mes_candidate['slots'] >= 0] # noqa\n if exp_slot_list:\n min_slot = min(exp_slot_list)\n mes_list =\\\n [mes_candidate['mes_id'] for mes_candidate in candidate_set if mes_candidate['slots'] == min_slot] # noqa\n final_candidate[req_vnf_name] = {'mes_id':mes_list[0], 'slots':min_slot}\n if len(final_candidate) == len(req_nf_list):\n rvnfa_is_accepted = True\n else:\n for remain_vnf_dict in req_nf_list:\n if remain_vnf_dict['name'] not in final_candidate:\n rvnfa_remain_list.append(remain_vnf_dict) # good one\n for req_vnf_name, mixed_mes_info in final_candidate:\n orig_mes_id = mixed_mes_info['mes_id']\n nf_slots = mixed_mes_info['slots']\n if orig_mes_id not in rvnfa_required_info:\n rvnfa_required_info[orig_mes_id] = dict()\n rvnfa_required_info[orig_mes_id].update({req_vnf_name: nf_slots})\n else:\n rvnfa_required_info[orig_mes_id].update({req_vnf_name: nf_slots})\n # return the list of NSs must be updated, when to create new mes?\n return rvnfa_is_accepted, rvnfa_required_info, rvnfa_remain_list\n\n def _run_meso_ha(req_vnf_list):\n final_candidate_id = None\n ha_required_info = dict()\n ha_remain_list = list()\n ha_is_accepted = False\n ns_candidate = _generic_ns_set(req_vnf_list)\n if not ns_candidate:\n return ha_is_accepted, None, req_nf_list\n ha_is_accepted, ha_mes_dict = _run_meso_rsfca(req_nf_list, ns_candidate)\n if ha_is_accepted:\n return ha_is_accepted, ha_mes_dict, None\n else:\n ns_list = list()\n for ck_mes_id, mes_data_dict in ns_candidate.items():\n lenNF = len(mes_data_dict)\n ns_list.append({'mes_id': ck_mes_id, 'numNFs': lenNF})\n maxNFs = max([ns_info['numNFs'] for ns_info in ns_list])\n first_filter_list = list()\n second_filter_list = list()\n for ns_info in ns_list:\n if ns_info['numNFs'] == maxNFs:\n mes_id = ns_info['mes_id']\n exp_NFs = [slots for exp_vnf_name, slots in ns_candidate[mes_id].items() if slots >= 0]\n unexp_NFs = [-slots for exp_vnf_name, slots in ns_candidate[mes_id].items() if slots < 0]\n if len(exp_NFs) == maxNFs:\n first_filter_list.append(\n {'mes_id': mes_id, 'slots': sum(exp_NFs)})\n else:\n second_filter_list.append(\n {'mes_id': mes_id, 'slots': sum(unexp_NFs)})\n if first_filter_list:\n exp_slot = min([exp_mes['slots'] for exp_mes in first_filter_list])\n exp_mes_list = [exp_mes['mes_id'] for exp_mes in first_filter_list if exp_mes['slots'] == exp_slot]\n final_candidate_id = exp_mes_list[0]\n if second_filter_list:\n unexp_slot = min([exp_mes['slots'] for exp_mes in second_filter_list])\n exp_mes_list = [exp_mes['mes_id'] for exp_mes in second_filter_list if\n exp_mes['slots'] == unexp_slot]\n final_candidate_id = exp_mes_list[0]\n\n if final_candidate_id:\n ha_required_info[final_candidate_id] = ns_candidate[final_candidate_id]\n rvnf_remain_list = [exp_vnf_dict for exp_vnf_dict in req_vnf_list if exp_vnf_dict['name'] not in ns_candidate[final_candidate_id]] # noqa\n vnf_is_accepted, rvnf_required_info, rvnf_remain_list = _run_meso_rvnfa(rvnf_remain_list)\n ha_remain_list.extend(rvnf_remain_list)\n ha_required_info.update(rvnf_required_info)\n # only return the mes_id and the mes_info need to update\n\n return ha_is_accepted, ha_required_info, ha_remain_list\n\n build_nsd_dict = dict()\n if mesd_dict['imports'].get('nsds'):\n # For framework evaluation\n nsd_template = mesd_dict['imports']['nsds']['nsd_templates']\n if isinstance(nsd_template, dict):\n if nsd_template.get('requirements'):\n req_nf_dict = nsd_template['requirements']\n req_nf_list = list()\n for vnf_dict in req_nf_dict:\n # Todo: make the requests more natural\n req_nf_list.append({'name': vnf_dict['name'], 'nf_ins': int(vnf_dict['vnfd_template'][5])})\n is_accepted, mes_info_dict = _run_meso_rsfca(req_nf_list)\n if is_accepted:\n update_list = list()\n for cd_mes_id, mes_dict in mes_info_dict.items():\n new_mesd_dict = dict()\n ref_mesd_dict = copy.deepcopy(mesd_dict)\n ref_mesd_dict['imports']['nsds']['nsd_templates']['requirements'] = mes_dict\n new_mesd_dict['mes'] = dict()\n new_mesd_dict['mes'] = {'mesd_template': yaml.safe_dump(ref_mesd_dict)}\n return self.update_mes(context,cd_mes_id, new_mesd_dict)\n # update_list.append(cd_mes_id)\n # return {}\n else:\n # Create the inline NS with the following template\n import_list = list()\n node_dict = dict()\n for vnfd in req_nf_dict:\n import_list.append(vnfd['vnfd_template'])\n node = 'tosca.nodes.nfv.' + vnfd['name']\n node_dict[vnfd['name']] = {'type': node}\n build_nsd_dict['tosca_definitions_version'] = 'tosca_simple_profile_for_nfv_1_0_0'\n build_nsd_dict['description'] = mes_info['description']\n build_nsd_dict['imports'] = import_list\n build_nsd_dict['topology_template'] = dict()\n build_nsd_dict['topology_template']['node_templates'] = node_dict\n\n # Temp update the new NFs\n # For these cases, remember when to update the MEA\n\n ha_is_accepted, required_info, remain_list = _run_meso_ha(req_nf_list)\n rvnfa_is_accepted, required_info, remain_list = _run_meso_rvnfa(req_nf_list)\n if required_info:\n update_list = list()\n for cd_mes_id, mes_dict in required_info.items():\n new_mesd_dict = dict()\n ref_mesd_dict = copy.deepcopy(mesd_dict)\n ref_mesd_dict['imports']['nsds']['nsd_templates']['requirements'] = mes_dict\n new_mesd_dict['mes'] = dict()\n new_mesd_dict['mes'] = {'mesd_template': yaml.safe_dump(ref_mesd_dict)}\n self.update_mes(context, cd_mes_id, new_mesd_dict)\n update_list.append(cd_mes_id)\n if not remain_list:\n return {}\n if remain_list:\n # reform the vnf_dict\n vnf_info_tpl = list()\n for vnf_dict in remain_list:\n vnf_ins = vnf_dict['nf_ins']\n node_name = vnf_dict['name']\n vnfd_tpl = 'vnfd' + node_name[3] + str(vnf_ins)\n vnf_info_tpl.append({'name': node_name, 'vnfd_template': vnfd_tpl})\n import_list = list()\n node_dict = dict()\n for vnfd in vnf_info_tpl:\n import_list.append(vnfd['vnfd_template'])\n node = 'tosca.nodes.nfv.' + vnfd['name']\n node_dict[vnfd['name']] = {'type': node}\n build_nsd_dict['tosca_definitions_version'] = 'tosca_simple_profile_for_nfv_1_0_0'\n build_nsd_dict['description'] = mes_info['description']\n build_nsd_dict['imports'] = import_list\n build_nsd_dict['topology_template'] = dict()\n build_nsd_dict['topology_template']['node_templates'] = node_dict\n\n nsds = mesd['attributes'].get('nsds')\n mes_info['mes_mapping']['NS'] = list()\n if nsds:\n nsds_list = nsds.split('-')\n for nsd in nsds_list:\n ns_name = nsd + '-' + name + '-' + uuidutils.generate_uuid()\n nsd_instance = self._nfv_drivers.invoke(\n nfv_driver, # How to tell it is Tacker\n 'nsd_get_by_name',\n nsd_name=nsd,\n auth_attr=vim_res['vim_auth'],)\n if nsd_instance:\n ns_arg = {'ns': {'nsd_id': nsd_instance['id'], 'name': ns_name}}\n ns_id = self._nfv_drivers.invoke(\n nfv_driver, # How to tell it is Tacker\n 'ns_create',\n ns_dict=ns_arg,\n auth_attr=vim_res['vim_auth'], )\n mes_info['mes_mapping']['NS'].append(ns_id)\n if build_nsd_dict:\n ns_name = 'nsd' + name + '-' + uuidutils.generate_uuid()\n ns_arg = {'ns': {'nsd_template': build_nsd_dict, 'name': ns_name,\n 'description': mes_info['description'], 'vim_id': '',\n 'tenant_id': mes_info['tenant_id'], 'attributes': {}}}\n ns_id = self._nfv_drivers.invoke(\n nfv_driver, # How to tell it is Tacker\n 'ns_create',\n ns_dict=ns_arg,\n auth_attr=vim_res['vim_auth'], )\n mes_info['mes_mapping']['NS'].append(ns_id)\n\n vnffgds = mesd['attributes'].get('vnffgds')\n if mesd_dict['imports'].get('vnffgds'):\n vnffgds_list = vnffgds.split('-')\n mes_info['mes_mapping']['VNFFG'] = list()\n for vnffgd in vnffgds_list:\n vnffg_name = vnffgds + '-' + name + '-' + uuidutils.generate_uuid()\n vnffgd_instance = self._nfv_drivers.invoke(\n nfv_driver, # How to tell it is Tacker\n 'vnffgd_get_by_name',\n vnffgd_name=vnffgd,\n auth_attr=vim_res['vim_auth'], )\n if vnffgd_instance:\n vnffg_arg = {'vnffg': {'vnffgd_id': vnffgd_instance['id'], 'name': vnffg_name}}\n vnffg_id = self._nfv_drivers.invoke(\n nfv_driver, # How to tell it is Tacker\n 'vnffg_create',\n vnffg_dict=vnffg_arg,\n auth_attr=vim_res['vim_auth'], )\n mes_info['mes_mapping']['VNFFG'].append(vnffg_id)\n\n meca_id = dict()\n # Create MEAs using MEO APIs\n try:\n meca_name = 'meca' + '-' + name + '-' + uuidutils.generate_uuid()\n # Separate the imports out from template\n mead_tpl_dict = dict()\n mead_tpl_dict['imports'] = mesd_dict['imports']['meads']['mead_templates']\n mecad_dict = copy.deepcopy(mesd_dict)\n mecad_dict.pop('imports')\n mecad_dict.update(mead_tpl_dict)\n LOG.debug('mesd %s', mecad_dict)\n meca_arg = {'meca': {'mecad_template': mecad_dict, 'name': meca_name,\n 'description': mes_info['description'], 'tenant_id': mes_info['tenant_id'],\n 'vim_id': mes_info['vim_id'], 'attributes': {}}}\n meca_dict = meo_plugin.create_meca(context, meca_arg)\n mes_info['mes_mapping']['MECA'] = meca_dict['id']\n except Exception as e:\n LOG.error('Error while creating the MECAs: %s', e)\n # Call Tacker client driver\n\n mes_dict = super(MesoPlugin, self).create_mes(context, mes)\n\n def _create_mes_wait(self_obj, mes_id):\n args = dict()\n mes_status = \"ACTIVE\"\n ns_status = \"PENDING_CREATE\"\n vnffg_status = \"PENDING_CREATE\"\n mec_status = \"PENDING_CREATE\"\n ns_retries = NS_RETRIES\n mec_retries = MEC_RETRIES\n vnffg_retries = VNFFG_RETRIES\n mes_mapping = self.get_mes(context, mes_id)['mes_mapping']\n # Check MECA\n meca_id = mes_mapping['MECA']\n while mec_status == \"PENDING_CREATE\" and mec_retries > 0:\n time.sleep(MEC_RETRY_WAIT)\n mec_status = meo_plugin.get_meca(context, meca_id)['status']\n LOG.debug('status: %s', mec_status)\n if mec_status == 'ACTIVE' or mec_status == 'ERROR':\n break\n mec_retries = mec_retries - 1\n error_reason = None\n if mec_retries == 0 and mec_status == 'PENDING_CREATE':\n error_reason = _(\n \"MES creation is not completed within\"\n \" {wait} seconds as creation of MECA\").format(\n wait=MEC_RETRIES * MEC_RETRY_WAIT)\n # Check NS/VNFFG status\n if mes_mapping.get('NS'):\n ns_list = mes_mapping['NS']\n while ns_status == \"PENDING_CREATE\" and ns_retries > 0:\n time.sleep(NS_RETRY_WAIT)\n # Todo: support multiple NSs\n ns_instance = self._nfv_drivers.invoke(\n nfv_driver, # How to tell it is Tacker\n 'ns_get',\n ns_id=ns_list[0],\n auth_attr=vim_res['vim_auth'], )\n ns_status = ns_instance['status']\n LOG.debug('status: %s', ns_status)\n if ns_status == 'ACTIVE' or ns_status == 'ERROR':\n break\n ns_retries = ns_retries - 1\n error_reason = None\n if ns_retries == 0 and ns_status == 'PENDING_CREATE':\n error_reason = _(\n \"MES creation is not completed within\"\n \" {wait} seconds as creation of NS(s)\").format(\n wait=NS_RETRIES * NS_RETRY_WAIT)\n\n # Determine args\n ns_cd = self._nfv_drivers.invoke(\n nfv_driver, # How to tell it is Tacker\n 'ns_get',\n ns_id=ns_list[0],\n auth_attr=vim_res['vim_auth'], )\n ns_instance_dict = ns_cd['mgmt_urls']\n ns_instance_list = ast.literal_eval(ns_instance_dict)\n args['NS'] = dict()\n\n for vnf_name, mgmt_url_list in ns_instance_list.items():\n # Todo: remember to change this with VM capacity\n vm_capacity = VM_CAPA[vnf_name]\n orig = [vm_capacity] * len(mgmt_url_list)\n args['NS'][vnf_name] = [(val - 1) for val in orig]\n\n if mes_mapping.get('VNFFG'):\n while vnffg_status == \"PENDING_CREATE\" and vnffg_retries > 0:\n time.sleep(VNFFG_RETRY_WAIT)\n vnffg_list = mes_mapping['VNFFG']\n # Todo: support multiple VNFFGs\n vnffg_instance = self._nfv_drivers.invoke(\n nfv_driver, # How to tell it is Tacker\n 'vnffg_get',\n vnffg_id=vnffg_list[0],\n auth_attr=vim_res['vim_auth'], )\n vnffg_status = vnffg_instance['status']\n LOG.debug('status: %s', vnffg_status)\n if vnffg_status == 'ACTIVE' or vnffg_status == 'ERROR':\n break\n vnffg_retries = vnffg_retries - 1\n error_reason = None\n if vnffg_retries == 0 and vnffg_status == 'PENDING_CREATE':\n error_reason = _(\n \"MES creation is not completed within\"\n \" {wait} seconds as creation of VNFFG(s)\").format(\n wait=VNFFG_RETRIES * VNFFG_RETRY_WAIT)\n if mec_status == \"ERROR\" or ns_status == \"ERROR\" or vnffg_status == \"ERROR\":\n mes_status = \"ERROR\"\n if error_reason:\n mes_status = \"PENDING_CREATE\"\n\n super(MesoPlugin, self).create_mes_post(context, mes_id, mes_status, error_reason, args)\n self.spawn_n(_create_mes_wait, self, mes_dict['id'])\n return mes_dict", "def CreateModel(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def create(self, validated_data):\n ModelClass = self.Meta.model\n instance = ModelClass()\n self.instance = instance\n for key, value in validated_data.items():\n setattr(instance, key, value)\n return super().create(validated_data)", "def create_model(self, model_config):\n\n return self.conn.create_model(\n **model_config)", "def abstract_create(self, model, params):\n # we check that the given fields exist\n self.check_fields_existence(model, params.keys())\n\n # then we create the record after preparing params\n return self.env[model].sudo().create(self._prepare_params(params))", "def to_simple_model(self, instance, **options): # nolint\r\n options = self.init_options(**options)\r\n fields, include, exclude, related = options['fields'], options['include'], options['exclude'], options['related'] # nolint\r\n\r\n result = dict(\r\n model=smart_unicode(instance._meta),\r\n pk=smart_unicode(\r\n instance._get_pk_val(), strings_only=True),\r\n fields=dict(),\r\n )\r\n\r\n m2m_fields = [f.name for f in instance._meta.many_to_many]\r\n o2m_fields = [f.get_accessor_name()\r\n for f in instance._meta.get_all_related_objects()]\r\n default_fields = set([field.name for field in instance._meta.fields\r\n if field.serialize])\r\n serialized_fields = fields or (default_fields | include) - exclude\r\n\r\n for fname in serialized_fields:\r\n\r\n # Respect `to_simple__<fname>`\r\n to_simple = getattr(\r\n self.scheme, 'to_simple__{0}'.format(fname), None)\r\n\r\n if to_simple:\r\n result['fields'][fname] = to_simple(instance, serializer=self)\r\n continue\r\n\r\n related_options = related.get(fname, dict())\r\n if related_options:\r\n related_options = self.init_options(**related_options)\r\n\r\n if fname in default_fields and not related_options:\r\n field = instance._meta.get_field(fname)\r\n value = field.value_from_object(instance)\r\n\r\n else:\r\n value = getattr(instance, fname, None)\r\n if isinstance(value, Manager):\r\n value = value.all()\r\n\r\n result['fields'][fname] = self.to_simple(\r\n value, **related_options)\r\n\r\n if self.format != 'django':\r\n fields = result['fields']\r\n fields['id'] = result['pk']\r\n result = fields\r\n\r\n return result", "def build_model(self):\n pass", "def build_model(self):\n pass", "def to_model(cls, obj):\n\n new_model = cls()\n\n for key, value in obj.iteritems():\n if value:\n if key == 'transcripts':\n setattr(new_model, key, [ModelConverter.to_model(Transcript, t) for t in value])\n elif key == 'acts' and cls == Transcript:\n setattr(new_model, key, [ModelConverter.to_model(Act, a) for a in value])\n elif key == 'subtitles':\n setattr(new_model, key, [ModelConverter.to_model(Subtitle, s) for s in value])\n else:\n setattr(new_model, key, value)\n\n return new_model", "def _related_fields(self):\r\n model_class, m2m = self._get_model_class_from_table(self.model._meta.db_table) \r\n related_fields = {\r\n self.model._meta.pk.attname: model_class\r\n }\r\n for attname, model_class in self._get_related_models(self.model):\r\n related_fields[attname] = model_class\r\n return related_fields", "def add_model(self, model):\n name = '.'.join((model.__module__, model.__name__))\n\n if model.__name__ is not 'MongoModel':\n self._models[name] = model\n self._process_relations(model)\n\n if self._waited_relations:\n self._handle_waited_relations()", "def model_table(name, *fields, app_label='internal'):\n model = apps.get_model(app_label, name)\n items = model.objects.all().values_list(*fields)\n field_names = [model._meta.get_field(field).verbose_name\n for field in fields]\n return {'items': items, 'fields': field_names}", "def create_model(self):\n self.create_model_file()\n self.create_model_unit_test()\n self.add_model_to_list()\n self.readme_reminder()", "def make_reference(reference_path, reference_meta, model):\n # Make file dirs\n reference_dirs = list(reference_meta.keys())\n ids = []\n \n # Get file fullpath\n files = []\n import glob\n if DEBUG:\n for refdir in reference_dirs[:20]:\n foundfiles = glob.glob(os.path.join(reference_path, refdir, \"*jpg\"))\n files.extend(foundfiles)\n for _ in foundfiles:\n ids.append(refdir)\n else:\n for refdir in reference_dirs:\n foundfiles = glob.glob(os.path.join(reference_path, refdir, \"*jpg\"))\n files.extend(foundfiles)\n for _ in foundfiles:\n ids.append(refdir)\n dataset = buzaiDataset(files, transforms_val)\n loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=SequentialSampler(dataset), num_workers=num_workers)\n\n # Get embeddings\n embeddings = predict(loader, files, model)\n \n return embeddings, np.array(ids)", "def create(self, vals):\n context = dict(self.env.context)\n name = vals.get(\"name\", context.get(\"default_name\"))\n\n if name is not None:\n # Calculate the splitted fields\n inverted = self._get_inverse_name(\n self._get_whitespace_cleaned_name(name),\n vals.get(\"is_company\",\n self.default_get([\"is_company\"])[\"is_company\"]))\n\n for key, value in inverted.items():\n if not vals.get(key) or context.get(\"copy\"):\n vals[key] = value\n\n # Remove the combined fields\n if \"name\" in vals:\n del vals[\"name\"]\n if \"default_name\" in context:\n del context[\"default_name\"]\n\n result = super(Members, self.with_context(context)).create(vals)\n result._add_role_followers()\n return result", "def __init__(self,\n name: str,\n meta_models: Mapping[str, MetaModel],\n evaluation_field_name: str) -> None:\n self.meta_models = meta_models\n self.evaluation_field_name = evaluation_field_name\n super().__init__(name, None, None)", "def register_vo_model(cls, to_register=None, *, name: Optional[str] = None):\n return cls._register_impl(\"vo_model\", to_register, name,)", "def _execute(self):\n return self.model_cls.objects.create_model(**self.request_obj.data_params)", "def build_model(self, **kwargs):\n raise NotImplementedError()", "def __init__(self, model_names):\n for name in model_names:\n model = spacy.load(name)\n self.pool[name] = SharedModel(name, model)\n log.debug(\"Initialized shared models in pool\")", "def new(cls, args, src_meta, trg_meta, waitk_lagging, name=None):\n # build source and target modality\n src_modality, trg_modality = cls.build_modalities(args, src_meta, trg_meta)\n encoder_params, decoder_params = {}, {}\n for f in cls.class_or_method_args():\n if f.name in args:\n if f.name.startswith(\"encoder.\"):\n encoder_params[f.name[8:]] = args[f.name]\n elif f.name.startswith(\"decoder.\"):\n decoder_params[f.name[8:]] = args[f.name]\n # build encoder and decoder\n encoder = build_encoder({\n \"encoder.class\": \"TransformerEncoder\",\n \"encoder.params\": encoder_params})\n decoder = build_decoder({\n \"decoder.class\": \"TransformerDecoder\",\n \"decoder.params\": decoder_params})\n model = cls(args, src_meta, trg_meta, src_modality, trg_modality,\n encoder, decoder, name=name)\n model.wait_k = waitk_lagging\n _ = model({\"src\": tf.convert_to_tensor([[1, 2, 3]], tf.int64),\n \"src_padding\": tf.convert_to_tensor([[0, 0., 0]], tf.float32),\n \"trg_input\": tf.convert_to_tensor([[1, 2, 3]], tf.int64)})\n return model", "def createObject(self, *args):\n return _libsbml.Submodel_createObject(self, *args)", "def test_create_without_translation(self):\n x = NotRequiredModel()\n\n self.assertNumQueries(1, lambda: x.save()) # only master object created\n self.assertEqual(sorted(x.get_available_languages()), [])", "def create_new_morpheme_language_model(data):\n morpheme_language_model = MorphemeLanguageModel(\n parent_directory = h.get_OLD_directory_path('morphemelanguagemodels', config=config),\n rare_delimiter = h.rare_delimiter,\n start_symbol = h.lm_start,\n end_symbol = h.lm_end,\n morpheme_delimiters = h.get_morpheme_delimiters(type_=u'unicode'),\n UUID = unicode(uuid4()),\n name = h.normalize(data['name']),\n description = h.normalize(data['description']),\n enterer = session['user'],\n modifier = session['user'],\n datetime_modified = h.now(),\n datetime_entered = h.now(),\n vocabulary_morphology = data['vocabulary_morphology'],\n corpus = data['corpus'],\n toolkit = data['toolkit'],\n order = data['order'],\n smoothing = data['smoothing'],\n categorial = data['categorial']\n )\n return morpheme_language_model", "def relate(self, related):\n\n self._module._connection.relate(self, related)", "def CreateModel(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def relation(self, related=None, group=None):\n\t\tif not related:\n\t\t\treturn None\n\n\t\t# Try to get parent model for multi-table models\n\t\tif hasattr(related, 'parent_model'):\n\t\t\trelated_content_type = ContentType.objects.get_for_model(related.parent_model)\n\t\telse:\n\t\t\trelated_content_type = ContentType.objects.get_for_model(type(related))\n\n\t\targs = {\n\t\t\t'content_type': ContentType.objects.get_for_model(type(self)),\n\t\t\t'object_id': self.pk,\n\t\t\t'related_object_id': related.pk,\n\t\t\t'related_content_type': related_content_type,\n\t\t}\n\n\t\tif group:\n\t\t\targs.update({'group': group})\n\n\t\tfrom .models import Related\n\t\treturn Related.objects.get(**args)", "def build_model(cls, args, task):\n # from fairseq.tasks.multilingual_translation import MultilingualTranslationTask\n # assert isinstance(task, MultilingualTranslationTask)\n\n # make sure all arguments are present in older models\n base_architecture(args)\n\n if args.share_encoders:\n args.share_encoder_embeddings = True\n\n ### nat model\n # build shared embeddings (if applicable)\n src_dict, tgt_dict = task.source_dictionary, task.target_dictionary\n if args.share_all_embeddings:\n if src_dict != tgt_dict:\n raise ValueError(\"--share-all-embeddings requires a joined dictionary\")\n if args.encoder_embed_dim != args.decoder_embed_dim:\n raise ValueError(\n \"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim\"\n )\n if args.decoder_embed_path and (\n args.decoder_embed_path != args.encoder_embed_path\n ):\n raise ValueError(\n \"--share-all-embeddings not compatible with --decoder-embed-path\"\n )\n encoder_embed_tokens = TransformerModel.build_embedding(\n args, src_dict, args.encoder_embed_dim, args.encoder_embed_path\n )\n decoder_embed_tokens = encoder_embed_tokens\n args.share_decoder_input_output_embed = True\n else:\n encoder_embed_tokens = TransformerModel.build_embedding(\n args, src_dict, args.encoder_embed_dim, args.encoder_embed_path\n )\n decoder_embed_tokens = TransformerModel.build_embedding(\n args, tgt_dict, args.decoder_embed_dim, args.decoder_embed_path\n )\n\n\n student_cls = ARCH_MODEL_REGISTRY[args.student_arch]\n encoder = student_cls.build_encoder(args, src_dict, encoder_embed_tokens)\n decoder = student_cls.build_decoder(args, tgt_dict, decoder_embed_tokens)\n student = student_cls(args,encoder,decoder)\n\n teacher_cls = ARCH_MODEL_REGISTRY[args.teacher_arch]\n if not issubclass(teacher_cls, NATransformerModel):\n teacher_cls = PatchedTransformerModel\n\n teacher_encoder = teacher_cls.build_encoder(\n args, src_dict,\n encoder_embed_tokens if args.share_encoder_embeddings else TransformerModel.build_embedding(\n args, src_dict, args.encoder_embed_dim, args.encoder_embed_path\n )\n )\n teacher_decoder = teacher_cls.build_decoder(\n args, tgt_dict,\n decoder_embed_tokens if args.share_decoder_embeddings else TransformerModel.build_embedding(\n args, tgt_dict, args.decoder_embed_dim, args.decoder_embed_path\n )\n )\n teacher = teacher_cls(args,teacher_encoder,teacher_decoder)\n\n return cls(args, student, teacher)", "def build_model(name, **model_params):\n assert name in globals().keys(),\\\n \"%s must be a model imported/defined in models/__init__.py\" % name\n return globals()[name](**model_params)", "def create(self):\n try:\n schema = MorphemeLanguageModelSchema()\n values = json.loads(unicode(request.body, request.charset))\n data = schema.to_python(values)\n lm = create_new_morpheme_language_model(data)\n Session.add(lm)\n Session.commit()\n lm.make_directory_safely(lm.directory)\n return lm\n except h.JSONDecodeError:\n response.status_int = 400\n return h.JSONDecodeErrorResponse\n except Invalid, e:\n response.status_int = 400\n return {'errors': e.unpack_errors()}", "def CreateModel(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def test_create_with_default_attributes(self):\n\n x = NotRequiredModel()\n x.tr_title = \"DEFAULT_TRANS_TITLE\"\n\n self.assertNumQueries(2, lambda: x.save()) # master and translation object created\n self.assertEqual(sorted(x.get_available_languages()), [self.conf_fallback])" ]
[ "0.7985823", "0.5591691", "0.49817428", "0.4975373", "0.4800113", "0.47464964", "0.4744232", "0.47395378", "0.47331527", "0.46989104", "0.46264884", "0.46264884", "0.459076", "0.45905438", "0.45603356", "0.4508293", "0.44997284", "0.44252264", "0.44179207", "0.4414165", "0.43915254", "0.4362893", "0.43542066", "0.43370935", "0.43317", "0.4326959", "0.43257198", "0.43151563", "0.42995852", "0.4281683", "0.42574745", "0.42555782", "0.42522705", "0.42377728", "0.42311996", "0.42266384", "0.42208588", "0.42044345", "0.4200406", "0.4195678", "0.4187495", "0.41842973", "0.4182652", "0.417641", "0.41641405", "0.41635406", "0.41593412", "0.41497415", "0.41338795", "0.41304627", "0.41297075", "0.4117998", "0.41088688", "0.41044727", "0.4087679", "0.4085747", "0.40850672", "0.4083723", "0.40767527", "0.40742543", "0.40717703", "0.4071316", "0.4071316", "0.4069415", "0.40691924", "0.40690523", "0.4062157", "0.40609255", "0.40570757", "0.4053193", "0.40520298", "0.4047658", "0.4045467", "0.40415528", "0.40405422", "0.40405422", "0.40353677", "0.40308827", "0.40300694", "0.40285102", "0.4027925", "0.40273622", "0.40073282", "0.40004224", "0.39938378", "0.398165", "0.3972927", "0.39701474", "0.3969889", "0.39657086", "0.39618576", "0.3957365", "0.39488292", "0.3945111", "0.39385164", "0.39343828", "0.39335382", "0.39322355", "0.3928167", "0.3916204" ]
0.822475
0
Contribute translations options to the inner Meta class and set the descriptors. This get's called from TranslateableModelBase.__new__
def contribute_translations(cls, rel): opts = cls._meta opts.translations_accessor = rel.get_accessor_name() opts.translations_model = rel.model opts.translations_cache = '%s_cache' % rel.get_accessor_name() trans_opts = opts.translations_model._meta # Set descriptors for field in trans_opts.fields: if field.name == 'pk': continue if field.name == 'master': continue if field.name == opts.translations_model._meta.pk.name: continue if field.name == 'language_code': attr = LanguageCodeAttribute(opts) else: attr = TranslatedAttribute(opts, field.name) setattr(cls, field.name, attr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_translations_model(model, related_name, meta, **fields):\n if not meta:\n meta = {}\n unique = [('language_code', 'master')]\n meta['unique_together'] = list(meta.get('unique_together', [])) + unique\n # Create inner Meta class \n Meta = type('Meta', (object,), meta)\n name = '%sTranslation' % model.__name__\n attrs = {}\n attrs.update(fields)\n attrs['Meta'] = Meta\n attrs['__module__'] = model.__module__\n attrs['language_code'] = models.CharField(max_length=15, db_index=True)\n # null=True is so we can prevent cascade deletion\n attrs['master'] = models.ForeignKey(model, related_name=related_name, editable=False, null=True)\n # Create and return the new model\n return ModelBase(name, (BaseTranslationModel,), attrs)", "def create_translations_model(shared_model, related_name, meta, **fields):\n if not meta:\n meta = {}\n\n if shared_model._meta.abstract:\n # This can't be done, because `master = ForeignKey(shared_model)` would fail.\n raise TypeError(\"Can't create TranslatedFieldsModel for abstract class {0}\".format(shared_model.__name__))\n\n # Define inner Meta class\n meta['app_label'] = shared_model._meta.app_label\n meta['db_tablespace'] = shared_model._meta.db_tablespace\n meta['managed'] = shared_model._meta.managed\n meta['unique_together'] = list(meta.get('unique_together', []))\n meta.setdefault('db_table', '{0}_translation'.format(shared_model._meta.db_table))\n meta.setdefault('verbose_name', _lazy_verbose_name(shared_model))\n\n # Avoid creating permissions for the translated model, these are not used at all.\n # This also avoids creating lengthy permission names above 50 chars.\n if django.VERSION >= (1,7):\n meta.setdefault('default_permissions', ())\n\n # Define attributes for translation table\n name = str('{0}Translation'.format(shared_model.__name__)) # makes it bytes, for type()\n\n attrs = {}\n attrs.update(fields)\n attrs['Meta'] = type(str('Meta'), (object,), meta)\n attrs['__module__'] = shared_model.__module__\n attrs['objects'] = models.Manager()\n attrs['master'] = models.ForeignKey(shared_model, related_name=related_name, editable=False, null=True)\n\n # Create and return the new model\n translations_model = TranslatedFieldsModelBase(name, (TranslatedFieldsModel,), attrs)\n\n # Register it as a global in the shared model's module.\n # This is needed so that Translation model instances, and objects which refer to them, can be properly pickled and unpickled.\n # The Django session and caching frameworks, in particular, depend on this behaviour.\n mod = sys.modules[shared_model.__module__]\n setattr(mod, name, translations_model)\n\n return translations_model", "def translate(self):\n raise NotImplementedError('subclass must override this method')", "def translations(self):\r\n return Translations(self)", "def translations(self):\r\n return Translations(self)", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n for field in self.fields:\n self.fields[field].label = False", "def __init__(self, language=None):\n self.language = language\n self.translations = {}", "def __init__(self,\r\n primary_language=None,\r\n secondary_language=None,\r\n xml_signature=None,\r\n additional_properties = {}):\r\n\r\n # Initialize members of the class\r\n self.primary_language = primary_language\r\n self.secondary_language = secondary_language\r\n self.xml_signature = xml_signature\r\n\r\n # Add additional model properties to the instance\r\n self.additional_properties = additional_properties", "def __init__(self, translate_language=None, translate_display=None, translate_attribution=None, translate_caching=None, translate_smart_rendering=None, translate_caching_duration=None, translate_session_save_interval=None, translate_session_save_batch_limit=None): # noqa: E501 # noqa: E501\n\n self._translate_language = None\n self._translate_display = None\n self._translate_attribution = None\n self._translate_caching = None\n self._translate_smart_rendering = None\n self._translate_caching_duration = None\n self._translate_session_save_interval = None\n self._translate_session_save_batch_limit = None\n self.discriminator = None\n\n if translate_language is not None:\n self.translate_language = translate_language\n if translate_display is not None:\n self.translate_display = translate_display\n if translate_attribution is not None:\n self.translate_attribution = translate_attribution\n if translate_caching is not None:\n self.translate_caching = translate_caching\n if translate_smart_rendering is not None:\n self.translate_smart_rendering = translate_smart_rendering\n if translate_caching_duration is not None:\n self.translate_caching_duration = translate_caching_duration\n if translate_session_save_interval is not None:\n self.translate_session_save_interval = translate_session_save_interval\n if translate_session_save_batch_limit is not None:\n self.translate_session_save_batch_limit = translate_session_save_batch_limit", "def get_translated_fields(self):\n for field in self.model._meta.get_fields():\n if isinstance(field, TranslatedVirtualField):\n yield field", "def __init__(self, model, options):\n self.model = model\n\n self.index_name = getattr(options, \"index_name\",\n self.model._meta.app_label.lower())\n self.doctype_name = getattr(options, \"doctype_name\",\n self.model._meta.object_name.lower())\n\n # initialize mapping for instance\n self._mapping = {\"properties\": {}}\n\n self.id_field = getattr(options, \"id_field\", \"pk\")\n self.excluded_fields = getattr(options, \"excluded_fields\", tuple())\n # mapping of custom fields used in elasticsearch but not in the django db and\n # their related django style fields. See `mapping_utils.FIELD_MAPPING`\n self.custom_fields = getattr(options, \"custom_fields\", dict())\n\n # mapping of additional options such as \"index\" \"analyze\" \"store\" etc...\n self.additional_options = getattr(options, \"additional_options\", dict())\n\n _fields = getattr(options, \"fields\", [])\n _fields.extend(self.custom_fields.keys())\n self.fields = self._get_fields(_fields, self.excluded_fields)\n\n self.id_field_type = getattr(options, \"id_field_type\", \"long\")\n self.fields.append(mapping.SearchField(\"id\", self.id_field_type))\n\n # get the serializer class\n self.serializer_class = getattr(options, \"serializer_class\",\n serializers.ModelJSONSerializer)", "def set_name_translation(self):\n\t\tcurrent = self.get_name_translation()\n\t\tif not self.label:\n\t\t\tif current:\n\t\t\t\t# clear translation\n\t\t\t\tfrappe.delete_doc(\"Translation\", current.name)\n\t\t\treturn\n\n\t\tif not current:\n\t\t\tfrappe.get_doc(\n\t\t\t\t{\n\t\t\t\t\t\"doctype\": \"Translation\",\n\t\t\t\t\t\"source_text\": self.doc_type,\n\t\t\t\t\t\"translated_text\": self.label,\n\t\t\t\t\t\"language_code\": frappe.local.lang or \"en\",\n\t\t\t\t}\n\t\t\t).insert()\n\t\t\treturn\n\n\t\tif self.label != current.translated_text:\n\t\t\tfrappe.db.set_value(\"Translation\", current.name, \"translated_text\", self.label)\n\t\t\tfrappe.translate.clear_cache()", "def get_translation(self):", "def editable_metadata_fields(self):\r\n def jsonify_value(field, json_choice):\r\n if isinstance(json_choice, dict):\r\n json_choice = dict(json_choice) # make a copy so below doesn't change the original\r\n if 'display_name' in json_choice:\r\n json_choice['display_name'] = get_text(json_choice['display_name'])\r\n if 'value' in json_choice:\r\n json_choice['value'] = field.to_json(json_choice['value'])\r\n else:\r\n json_choice = field.to_json(json_choice)\r\n return json_choice\r\n\r\n def get_text(value):\r\n \"\"\"Localize a text value that might be None.\"\"\"\r\n if value is None:\r\n return None\r\n else:\r\n return self.runtime.service(self, \"i18n\").ugettext(value)\r\n\r\n metadata_fields = {}\r\n\r\n # Only use the fields from this class, not mixins\r\n fields = getattr(self, 'unmixed_class', self.__class__).fields\r\n\r\n for field in fields.values():\r\n\r\n if field.scope != Scope.settings or field in self.non_editable_metadata_fields:\r\n continue\r\n\r\n # gets the 'default_value' and 'explicitly_set' attrs\r\n metadata_fields[field.name] = self.runtime.get_field_provenance(self, field)\r\n metadata_fields[field.name]['field_name'] = field.name\r\n metadata_fields[field.name]['display_name'] = get_text(field.display_name)\r\n metadata_fields[field.name]['help'] = get_text(field.help)\r\n metadata_fields[field.name]['value'] = field.read_json(self)\r\n\r\n # We support the following editors:\r\n # 1. A select editor for fields with a list of possible values (includes Booleans).\r\n # 2. Number editors for integers and floats.\r\n # 3. A generic string editor for anything else (editing JSON representation of the value).\r\n editor_type = \"Generic\"\r\n values = field.values\r\n if isinstance(values, (tuple, list)) and len(values) > 0:\r\n editor_type = \"Select\"\r\n values = [jsonify_value(field, json_choice) for json_choice in values]\r\n elif isinstance(field, Integer):\r\n editor_type = \"Integer\"\r\n elif isinstance(field, Float):\r\n editor_type = \"Float\"\r\n elif isinstance(field, List):\r\n editor_type = \"List\"\r\n elif isinstance(field, Dict):\r\n editor_type = \"Dict\"\r\n elif isinstance(field, RelativeTime):\r\n editor_type = \"RelativeTime\"\r\n metadata_fields[field.name]['type'] = editor_type\r\n metadata_fields[field.name]['options'] = [] if values is None else values\r\n\r\n return metadata_fields", "def translate_as(self, lang):\n trans = PublicationLocalization.objects.filter(publication=self,\n language=lang,\n is_active=True).first()\n if trans:\n self.title = trans.title\n self.subheading = trans.subheading\n self.content = trans.content", "def translate(self):\n pass", "def _add_meta(self, *args, **kwargs) -> None:\n raise NotImplementedError", "def add_translations(self, translations):\n for translation in translations:\n self.add_field_translation(translation)", "def __init__(self):\n super().__init__(interface.Metadata, DEFAULT_PRIORITIES)", "def __init__(self, language=config[\"default_language\"],\n lowercasing=config[\"lowercasing\"],\n path=None, resource=\"associations\"):\n super(AssociationDictionary, self).__init__(language=language,\n lowercasing=lowercasing,\n path=path, resource=resource)", "def meta(self, meta):\n\n self._meta = meta", "def meta(self, meta):\n\n self._meta = meta", "def customize(cls, **kwargs):\n\n store_as = apply_pssm(kwargs.get('store_as', None), PSSM_VALUES)\n if store_as is not None:\n kwargs['store_as'] = store_as\n\n cls_name, cls_bases, cls_dict = cls._s_customize(cls, **kwargs)\n cls_dict['__module__'] = cls.__module__\n\n retval = type(cls_name, cls_bases, cls_dict)\n retval._type_info = TypeInfo(cls._type_info)\n retval.__type_name__ = cls.__type_name__\n retval.__namespace__ = cls.__namespace__\n retval.Attributes.parent_variant = cls\n\n dca = retval.Attributes._delayed_child_attrs\n if retval.Attributes._delayed_child_attrs is None:\n retval.Attributes._delayed_child_attrs = {}\n else:\n retval.Attributes._delayed_child_attrs = dict(dca.items())\n\n child_attrs = kwargs.get('child_attrs', None)\n if child_attrs is not None:\n ti = retval._type_info\n for k, v in child_attrs.items():\n if k in ti:\n ti[k] = ti[k].customize(**v)\n else:\n retval.Attributes._delayed_child_attrs[k] = v\n\n tn = kwargs.get(\"type_name\", None)\n if tn is not None:\n retval.__type_name__ = tn\n\n ns = kwargs.get(\"namespace\", None)\n if ns is not None:\n retval.__namespace__ = ns\n\n if not cls is ComplexModel:\n cls._process_variants(retval)\n\n # we could be smarter, but customize is supposed to be called only while\n # daemon initialization, so it's not really necessary.\n ComplexModelBase.get_subclasses.memo.clear()\n ComplexModelBase.get_flat_type_info.memo.clear()\n ComplexModelBase.get_simple_type_info.memo.clear()\n\n return retval", "def extended_object(self):\n return create_i18n_page(\n title=self.title,\n languages=self.languages,\n template=self.template,\n in_navigation=self.in_navigation,\n parent=self.parent,\n )", "def __init__(self, model, **kwargs):\n\n super().__init__(model)\n\n self._ut = UnscentedTransform(model, **kwargs)", "def _set_attributes(self):", "def __init__(self, datastoreio_stub, label=None):\n super(PutModels, self).__init__(label=label)\n self.datastoreio = datastoreio_stub", "def contribute_to_class(cls, model_class=TaskResult):\n for field in dir(cls):\n if not field.startswith(\"_\") and field not in ('contribute_to_class', 'Meta'):\n model_class.add_to_class(field, getattr(cls, field))\n\n # manually add Meta afterwards\n setattr(model_class._meta, 'indexes', getattr(model_class._meta, 'indexes', []) + cls.Meta.indexes)", "def __new__(cls, *args, **kwargs):\n ores = super(BaseDataObject, cls).__new__(cls)\n if cls.context is not None:\n ores.context = cls.context\n ores.add_contextualization(cls.context, ores)\n res = ores\n else:\n ores.context = None\n res = ores\n\n return res", "def translate(self, language=None):", "def __attrs_post_init__(self):", "def meta(self):\n raise NotImplementedError", "def setUp(self):\n super().setUp()\n described_model_kwargs = {\"name\": \"Name\", \"description\": \"Description\"}\n self.mapped_model = MappedModel[DescribedModel].parse_obj({\"described_model_type\": described_model_kwargs})\n self.expected_described_model = DescribedModel(**described_model_kwargs)", "def create(self, attributes=None, **kwargs):\n\n return super(LocalesProxy, self).create(None, attributes)", "def setUp(self):\n super().setUp()\n translation.activate(\"en-us\")", "def _build_model_internal(self, opts):\n assert False, 'VAE base class has no build_model method defined.'", "def translated_fields(model):\n\n options = translator.get_options_for_model(model)\n fields = [f.name for l in options.fields.values() for f in l]\n\n for i, f in enumerate(fields):\n if f.endswith(settings.MODELTRANSLATION_DEFAULT_LANGUAGE):\n del fields[i]\n\n return fields", "def __init__(self, *args, **kwargs):\n super(AppswellSimpleModelForm, self).__init__(*args, **kwargs)\n\n # override labels\n self.fields['message'].label = 'log message'", "def kotti_configure(settings):\n from .sqla import attach_language_independent_fields\n\n settings['pyramid.includes'] += ' kotti_multilingual'\n\n settings['kotti.available_types'] += \\\n ' kotti_multilingual.resources.LanguageRoot'\n\n Content.type_info.edit_links.append(\n LinkRenderer(\n name='translation-dropdown',\n predicate=translation_predicate,\n )\n )\n Document.type_info.addable_to.append('LanguageRoot')\n File.type_info.addable_to.append('LanguageRoot')\n Image.type_info.addable_to.append('LanguageRoot')\n\n File.type_info.language_independent_fields = ('data',)\n Image.type_info.language_independent_fields = ('data',)\n\n event.listen(\n mapper, 'mapper_configured', attach_language_independent_fields)", "def set_fields(self, fields: FieldDict):\n super().set_fields(fields)\n nested_field: NestedField = self.fields[self.nested]\n if not isinstance(nested_field, NestedField):\n raise TypeError(\n f'The field \"{self.nested}\" must be a NestedField instance, not \"{nested_field}\".')\n if nested_field.many:\n raise ValueError(f'The field \"{self.nested}\" can not be set as \"many=True\".')\n self.nested_field = nested_field\n # create partial methods\n self._do_dump = partial(\n getattr(self, self.dump_method),\n target=nested_field.dump_target,\n method=nested_field.dump,\n )\n self._do_load = partial(\n getattr(self, self.load_method),\n target=nested_field.load_target,\n method=nested_field.load,\n )", "def translations(self, **kwargs):\n\n path = self._get_movie_id_path('translations')\n resp = self._get_method(path, kwargs)\n return resp", "def set_specific_fields(self):\n raise NotImplementedError(\"Must be defined by subclass!\")", "def setup(self, **kwargs):\n\n for k, v in kwargs.items():\n setattr(self, k, v)", "def __init__(self, **kwargs):\n super(Model, self).__init__(**kwargs)\n\n for (key, value) in kwargs.iteritems():\n # use setattr so that validation is triggered\n setattr(self, key, value)", "def _get_studio_action_translations(self, model, **kwargs):\n domain = ['|', ('name', '=', model.model), ('name', 'ilike', model.model + ',')]\n\n # search view + its inheritancies\n views = request.env['ir.ui.view'].search([('model', '=', model.model)])\n domain = ['|', '&', ('name', '=', 'ir.ui.view,arch_db'), ('res_id', 'in', views.ids)] + domain\n\n def make_domain(fld, rec):\n name = \"%s,%s\" % (fld.model_name, fld.name)\n return ['&', ('res_id', '=', rec.id), ('name', '=', name)]\n\n def insert_missing(fld, rec):\n if not fld.translate:\n return []\n\n if fld.related:\n try:\n # traverse related fields up to their data source\n while fld.related:\n rec, fld = fld.traverse_related(rec)\n if rec:\n return ['|'] + domain + make_domain(fld, rec)\n except AccessError:\n return []\n\n assert fld.translate and rec._name == fld.model_name\n request.env['ir.translation'].insert_missing(fld, rec)\n return []\n\n # insert missing translations of views\n for view in views:\n for name, fld in view._fields.items():\n domain += insert_missing(fld, view)\n\n # insert missing translations of model, and extend domain for related fields\n record = request.env[model.model].search([], limit=1)\n if record:\n for name, fld in record._fields.items():\n domain += insert_missing(fld, record)\n\n action = {\n 'name': _('Translate view'),\n 'type': 'ir.actions.act_window',\n 'res_model': 'ir.translation',\n 'view_mode': 'tree',\n 'views': [[request.env.ref('base.view_translation_dialog_tree').id, 'list']],\n 'target': 'current',\n 'domain': domain,\n }\n\n return action", "def __init__(self, **kwargs):\n self.swagger_types = {\n 'key': 'str',\n 'display_name': 'str',\n 'description': 'str',\n 'glossary_key': 'str',\n 'parent_term_key': 'str',\n 'is_allowed_to_have_child_terms': 'bool',\n 'path': 'str',\n 'lifecycle_state': 'str',\n 'time_created': 'datetime',\n 'time_updated': 'datetime',\n 'created_by_id': 'str',\n 'updated_by_id': 'str',\n 'owner': 'str',\n 'workflow_status': 'str',\n 'uri': 'str',\n 'associated_object_count': 'int',\n 'associated_objects': 'list[TermAssociatedObject]'\n }\n\n self.attribute_map = {\n 'key': 'key',\n 'display_name': 'displayName',\n 'description': 'description',\n 'glossary_key': 'glossaryKey',\n 'parent_term_key': 'parentTermKey',\n 'is_allowed_to_have_child_terms': 'isAllowedToHaveChildTerms',\n 'path': 'path',\n 'lifecycle_state': 'lifecycleState',\n 'time_created': 'timeCreated',\n 'time_updated': 'timeUpdated',\n 'created_by_id': 'createdById',\n 'updated_by_id': 'updatedById',\n 'owner': 'owner',\n 'workflow_status': 'workflowStatus',\n 'uri': 'uri',\n 'associated_object_count': 'associatedObjectCount',\n 'associated_objects': 'associatedObjects'\n }\n\n self._key = None\n self._display_name = None\n self._description = None\n self._glossary_key = None\n self._parent_term_key = None\n self._is_allowed_to_have_child_terms = None\n self._path = None\n self._lifecycle_state = None\n self._time_created = None\n self._time_updated = None\n self._created_by_id = None\n self._updated_by_id = None\n self._owner = None\n self._workflow_status = None\n self._uri = None\n self._associated_object_count = None\n self._associated_objects = None", "def __init__(self, *args, **kwargs):\n super(ChoiceFieldType, self).__init__(*args, **kwargs)\n\n self.choices = self.get_field_info_key('choices')", "def __init__(self, english_speaking: int=None, non_english_speaking: int=None): # noqa: E501\n self.swagger_types = {\n 'english_speaking': int,\n 'non_english_speaking': int\n }\n\n self.attribute_map = {\n 'english_speaking': 'english_speaking',\n 'non_english_speaking': 'non_english_speaking'\n }\n self._english_speaking = english_speaking\n self._non_english_speaking = non_english_speaking", "def __init__(self, *args, **kwargs):\n super(ModifyModelAdmin, self).__init__(*args, **kwargs)\n\n self.inline_instances_formset = []\n self.formset_pages = {}\n for item in self.inlines_formsets_pages:\n self.formset_pages[item[1]] = {\"title\" : item[0], \"instances\" : []}\n for inline_class in item[2][\"models\"]:\n inline_instance = inline_class(self.model, self.admin_site)\n self.formset_pages[item[1]][\"instances\"].append(inline_instance)\n # self.inline_instances_formset.append(inline_instance)", "def save_translations(cls, instance, **kwargs):\n opts = cls._meta\n if hasattr(instance, opts.translations_cache):\n trans = getattr(instance, opts.translations_cache)\n if not trans.master_id:\n trans.master = instance\n trans.save()", "def __init__(self, *args, **kwargs):\n super(HiddenModelObjectInputForm, self).__init__(*args, **kwargs)\n self.fields['model'].choices = get_registered_models(\n ignore=IGNORED_MODELS\n )", "def translate(self):\n\t\tself._translate(True)", "def __init__(self, *, language_pair=(None, None), **kwargs):\n name = \"%s_to_%s\" % (language_pair[0].replace(\"_\", \"\"), language_pair[1])\n\n description = \"Translation dataset from %s to %s in plain text.\" % (\n language_pair[0],\n language_pair[1],\n )\n super(TedHrlrConfig, self).__init__(\n name=name, description=description, **kwargs\n )\n\n # Validate language pair.\n assert language_pair in _VALID_LANGUAGE_PAIRS, (\n \"Config language pair (%s, %s) not supported\" % language_pair\n )\n\n self.language_pair = language_pair", "def set_required_for_language(form_class):\n\n css_classname = 'required-for-language'\n\n fields = form_class._meta.model.get_required_translatable_fields()\n for name, (code, _) in itertools.product(fields, settings.LANGUAGES):\n field_name = build_localized_fieldname(name, lang=code)\n field = form_class.base_fields[field_name]\n if field.required is False:\n attrs = field.widget.attrs\n attrs['required_for_language'] = True\n attrs['class'] = attrs.get('class', '') + ' ' + css_classname", "def set_additional_fields(cls, model, data):\n for k, v in data.items():\n if not hasattr(model, k):\n setattr(model, k, v)", "def setup_transformProperties(self):\n for method in self.render_methods:\n curr = getattr(self, 'transform_%s' % method, '')\n try:\n self._delProperty('transform_%s' % method)\n except ValueError:\n pass\n try:\n delattr(self, 'transform_%s' % method)\n except:\n pass\n setattr(self, 'transform_%s' % method, curr)\n\n properties = list(self._properties)\n for method in self.render_methods:\n properties.append({'id': 'transform_%s' % method,\n 'type': 'selection',\n 'select_variable': 'get_templateCandidates',\n 'mode': 'w'})\n \n self._properties = tuple(properties)", "def __init__(self, columns_to_trans='all', trans_flag=True):\n self.columns_to_trans = columns_to_trans\n self.trans_flag = trans_flag", "def __init__(cls, name, bases, dct):\n if bases != (object,):\n for v in dct.values():\n if isinstance(v, _DefinitionClass) and v is not Message:\n v._message_definition = weakref.ref(cls)\n\n for field in cls.all_fields():\n field._message_definition = weakref.ref(cls)\n\n _DefinitionClass.__init__(cls, name, bases, dct)", "def test_create_with_default_attributes(self):\n\n x = NotRequiredModel()\n x.tr_title = \"DEFAULT_TRANS_TITLE\"\n\n self.assertNumQueries(2, lambda: x.save()) # master and translation object created\n self.assertEqual(sorted(x.get_available_languages()), [self.conf_fallback])", "def translations_en_text(self, translations_en_text):\n\n self._translations_en_text = translations_en_text", "def add_meta(self, post, *args, **kwargs):\n\t\tsuper(Command, self).add_meta(post, *args, **kwargs)\n\t\tpost.gen_description = False\n\t\tpost.description = description_from_content(post)\n\t\tpost.save()", "def __init__(self,\r\n setup=None,\r\n merge_fields=None,\r\n additional_properties = {}):\r\n\r\n # Initialize members of the class\r\n self.setup = setup\r\n self.merge_fields = merge_fields\r\n\r\n # Add additional model properties to the instance\r\n self.additional_properties = additional_properties", "def use_en(self):\n pass", "def __init__(self, plugin, translator, parent=None):\n super(TranslatorWidget, self).__init__(parent)\n self.setupUi(self)\n \n self.__plugin = plugin\n self.__translator = translator\n \n self.__languages = TranslatorLanguagesDb(self)\n \n self.__translatorRequest = None\n self.__translationEngine = None\n \n self.__mediaPlayer = None\n self.__mediaFile = None\n \n audioAvailable = (MULTIMEDIA_AVAILABLE and\n bool(QMediaPlayer.hasSupport(\"audio/mpeg\")))\n self.pronounceOrigButton.setVisible(audioAvailable)\n self.pronounceTransButton.setVisible(audioAvailable)\n \n self.pronounceOrigButton.setIcon(\n self.__translator.getAppIcon(\"pronounce.png\"))\n self.pronounceTransButton.setIcon(\n self.__translator.getAppIcon(\"pronounce.png\"))\n self.swapButton.setIcon(\n self.__translator.getAppIcon(\"swap.png\"))\n self.translateButton.setIcon(\n self.__translator.getAppIcon(\"translate.png\"))\n self.clearButton.setIcon(UI.PixmapCache.getIcon(\"editDelete.png\"))\n self.preferencesButton.setIcon(UI.PixmapCache.getIcon(\"configure.png\"))\n \n self.translateButton.setEnabled(False)\n self.clearButton.setEnabled(False)\n self.pronounceOrigButton.setEnabled(False)\n self.pronounceTransButton.setEnabled(False)\n \n selectedEngine = self.__plugin.getPreferences(\"SelectedEngine\")\n \n self.__updateEngines()\n engineIndex = self.engineComboBox.findData(selectedEngine)\n self.engineComboBox.setCurrentIndex(engineIndex)\n self.__engineComboBoxCurrentIndexChanged(engineIndex)\n \n self.engineComboBox.currentIndexChanged.connect(\n self.__engineComboBoxCurrentIndexChanged)\n self.__plugin.updateLanguages.connect(self.__updateLanguages)", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'date_created': 'datetime',\n 'date_modified': 'datetime',\n 'version': 'int',\n 'division': 'DomainEntityRef',\n 'campaign_status': 'str',\n 'callable_time_set': 'DomainEntityRef',\n 'contact_list': 'DomainEntityRef',\n 'dnc_lists': 'list[DomainEntityRef]',\n 'always_running': 'bool',\n 'contact_sorts': 'list[ContactSort]',\n 'messages_per_minute': 'int',\n 'errors': 'list[RestErrorDetail]',\n 'sms_config': 'SmsConfig',\n 'self_uri': 'str'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'date_created': 'dateCreated',\n 'date_modified': 'dateModified',\n 'version': 'version',\n 'division': 'division',\n 'campaign_status': 'campaignStatus',\n 'callable_time_set': 'callableTimeSet',\n 'contact_list': 'contactList',\n 'dnc_lists': 'dncLists',\n 'always_running': 'alwaysRunning',\n 'contact_sorts': 'contactSorts',\n 'messages_per_minute': 'messagesPerMinute',\n 'errors': 'errors',\n 'sms_config': 'smsConfig',\n 'self_uri': 'selfUri'\n }\n\n self._id = None\n self._name = None\n self._date_created = None\n self._date_modified = None\n self._version = None\n self._division = None\n self._campaign_status = None\n self._callable_time_set = None\n self._contact_list = None\n self._dnc_lists = None\n self._always_running = None\n self._contact_sorts = None\n self._messages_per_minute = None\n self._errors = None\n self._sms_config = None\n self._self_uri = None", "def _build_model(self, **kwargs):\n pass", "def test_get_translation_resources(self):\n pass", "def prepare_model(self, **kwargs):\n pass", "def field_choices_used_to_translated_value():\r\n LANGUAGES = (\r\n ('en', 'English'),\r\n ('ru', 'Russian'),\r\n )\r\n\r\n from django.db import models\r\n\r\n class Article(models.Model):\r\n name = models.CharField(max_length=200)\r\n language = models.CharField(max_length=200, choices=LANGUAGES)\r\n\r\n def __unicode__(self):\r\n return self.name\r\n\r\n class ArticleTable(tables.Table):\r\n class Meta:\r\n model = Article\r\n\r\n table = ArticleTable([Article(name='English article', language='en'),\r\n Article(name='Russian article', language='ru')])\r\n\r\n assert 'English' == table.rows[0]['language']\r\n assert 'Russian' == table.rows[1]['language']", "def __init__(\n self,\n parent_model: 'Any',\n model: 'Any',\n info: 'ResolveInfo',\n graphql_args: dict,\n ):\n super().__init__()\n self.info: 'ResolveInfo' = info\n self.graphql_args: dict = graphql_args\n\n self.model: 'Any' = model\n self.parent_model: 'Any' = parent_model\n self.parent_model_pks: 'Tuple[str, ...]' = self._get_model_pks(\n self.parent_model\n )\n self.parent_model_pk_fields: tuple = tuple(\n getattr(self.parent_model, pk) for pk in self.parent_model_pks\n )\n\n self.model_relation_field: str = to_snake_case(self.info.field_name)\n\n self.relation: 'Any' = getattr(\n self.parent_model, self.model_relation_field\n )", "def __init__(self, name, description, field_type_processor, required=False):\n FieldDescriptor.__init__(self, name, description, \n field_type_processor.extract, required)\n # add an adapt method\n self.adapt = field_type_processor.adapt", "def __init__(self, name, description, field_type_processor, required=False):\n FieldDescriptor.__init__(self, name, description, \n field_type_processor.extract, required)\n # add an adapt method\n self.adapt = field_type_processor.adapt", "def __init__(self, **kwargs):\n super(Model, self).__init__(**kwargs)", "def handle_translation_registrations(*args, **kwargs):\n from modeltranslation.settings import ENABLE_REGISTRATIONS\n\n if not ENABLE_REGISTRATIONS:\n # If the user really wants to disable this, they can, possibly at their\n # own expense. This is generally only required in cases where other\n # apps generate import errors and requires extra work on the user's\n # part to make things work.\n return\n\n # Trigger autodiscover, causing any TranslationOption initialization\n # code to execute.\n autodiscover()", "def __init__(self, **options):\n self.__dict__.update(\n (k, v) for (k, v) in options.items() if not k.startswith('__'))", "def get_context(self, request, *args, **kwargs):\n context = super().get_context(request, *args, **kwargs)\n context['lang_versions'] = self.get_translations()\n context['default_lang'] = (settings.LANGUAGES[0][0])\n return context", "def __init__(self, parent=None):\n super(LocalizePanel, self).__init__(parent)\n self.setObjectName('foundry.localization.localizationpanel')\n self.setWindowTitle('Localization Panel')\n settings_path = os.path.expanduser('~/.nuke/uistate.ini')\n self.settings = QtCore.QSettings(settings_path, QtCore.QSettings.IniFormat)\n self.settings.setFallbacksEnabled(False) \n self.setMinimumWidth(10)\n self.proxy_model = LocalizeProxyModel()\n self.model = LocalizeModel()\n self.proxy_model.setSourceModel(self.model)\n self.session_has_callbacks = None\n self.init_UI()\n self.__read_settings() # read uistate.ini\n self.__init_signal_helper()\n # Delay signal connection a little to ensure the Cache/Localization menu is fully loaded\n # so the panel can be connected to it's signals.\n QtCore.QTimer.singleShot(500, self.connect_signals)", "def __init__(self, **kwargs):\n super(Transform, self).__init__('transforms')\n\n # Import validators\n # -----------------\n from plotly.validators.heatmapgl import (transform as v_transform)\n\n # Initialize validators\n # ---------------------\n\n # Populate data dict with properties\n # ----------------------------------\n\n # Process unknown kwargs\n # ----------------------\n self._process_kwargs(**kwargs)", "def update_attributes_map(klass):\n\n return {\n 'name': '',\n 'default_locale': ''\n }", "def set_description(self, text, lang=0):\n self.localized_strings[lang] = text", "def fix_metadata(po):\r\n\r\n # By default, django-admin.py makemessages creates this metadata:\r\n #\r\n # {u'PO-Revision-Date': u'YEAR-MO-DA HO:MI+ZONE',\r\n # u'Language': u'',\r\n # u'Content-Transfer-Encoding': u'8bit',\r\n # u'Project-Id-Version': u'PACKAGE VERSION',\r\n # u'Report-Msgid-Bugs-To': u'',\r\n # u'Last-Translator': u'FULL NAME <EMAIL@ADDRESS>',\r\n # u'Language-Team': u'LANGUAGE <LL@li.org>',\r\n # u'POT-Creation-Date': u'2013-04-25 14:14-0400',\r\n # u'Content-Type': u'text/plain; charset=UTF-8',\r\n # u'MIME-Version': u'1.0'}\r\n\r\n fixes = {\r\n 'PO-Revision-Date': datetime.utcnow(),\r\n 'Report-Msgid-Bugs-To': 'openedx-translation@googlegroups.com',\r\n 'Project-Id-Version': '0.1a',\r\n 'Language': 'en',\r\n 'Last-Translator': '',\r\n 'Language-Team': 'openedx-translation <openedx-translation@googlegroups.com>',\r\n }\r\n po.metadata.update(fixes)", "def __init__(self, *args):\n super().__init__('description', *args, required=True, hidden=False, limit=-1)", "def formfield(self, **kwargs):\n if self.plugin_class:\n self._choices = self.plugin_class.get_all_choices(field=self)\n return super(TemplateNameField, self).formfield(**kwargs)", "def __init__(self, obj, adapted_methods):\n self.obj = obj\n self.__dict__.update(adapted_methods)", "def __init__(self, model, parent=None, relation=None, reverse=None,\n related_name=None, accessor_name=None, nullable=False,\n depth=0):\n\n self.model = model\n\n self.app_name = model._meta.app_label\n self.model_name = model._meta.object_name\n self.db_table = model._meta.db_table\n self.pk_column = model._meta.pk.column\n\n self.parent = parent\n self.parent_model = parent and parent.model or None\n\n self.relation = relation\n self.reverse = reverse\n\n self.related_name = related_name\n self.accessor_name = accessor_name\n self.nullable = nullable\n self.depth = depth\n\n self.children = []", "def __new__(cls, class_name, bases, dict):\n opts = ModelReadOnlyTemplateOptions(dict.get('Meta', None))\n\n if opts.model is not None:\n model_fields = SortedDict()\n model_hidden_fields = {}\n for name, prop in sorted(opts.model.properties().iteritems(),\n key=lambda prop: prop[1].creation_counter):\n if opts.fields and name not in opts.fields:\n if opts.hidden_fields and name in opts.hidden_fields:\n model_hidden_fields[name] = prop\n continue\n if opts.exclude and name in opts.exclude:\n continue\n\n model_fields[name] = prop\n\n dict['fields'] = model_fields\n dict['hidden_fields'] = model_hidden_fields\n dict['renderers'] = opts.renderers or {}\n\n if opts.css_prefix:\n dict['css_prefix'] = opts.css_prefix\n\n return super(ModelReadOnlyTemplateMetaclass, cls).__new__(\n cls, class_name, bases, dict)", "def __init__(self, **kwargs):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'version': 'str',\n 'tagline': 'str',\n 'keywords': 'str',\n 'short_description': 'str',\n 'usage_information': 'str',\n 'long_description': 'str',\n 'license_model_description': 'str',\n 'system_requirements': 'str',\n 'time_released': 'datetime',\n 'release_notes': 'str',\n 'categories': 'list[str]',\n 'publisher': 'Publisher',\n 'languages': 'list[Item]',\n 'screenshots': 'list[Screenshot]',\n 'videos': 'list[NamedLink]',\n 'support_contacts': 'list[SupportContact]',\n 'support_links': 'list[NamedLink]',\n 'documentation_links': 'list[DocumentationLink]',\n 'icon': 'UploadData',\n 'banner': 'UploadData',\n 'regions': 'list[Region]',\n 'package_type': 'str',\n 'default_package_version': 'str',\n 'links': 'list[Link]',\n 'is_featured': 'bool'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'version': 'version',\n 'tagline': 'tagline',\n 'keywords': 'keywords',\n 'short_description': 'shortDescription',\n 'usage_information': 'usageInformation',\n 'long_description': 'longDescription',\n 'license_model_description': 'licenseModelDescription',\n 'system_requirements': 'systemRequirements',\n 'time_released': 'timeReleased',\n 'release_notes': 'releaseNotes',\n 'categories': 'categories',\n 'publisher': 'publisher',\n 'languages': 'languages',\n 'screenshots': 'screenshots',\n 'videos': 'videos',\n 'support_contacts': 'supportContacts',\n 'support_links': 'supportLinks',\n 'documentation_links': 'documentationLinks',\n 'icon': 'icon',\n 'banner': 'banner',\n 'regions': 'regions',\n 'package_type': 'packageType',\n 'default_package_version': 'defaultPackageVersion',\n 'links': 'links',\n 'is_featured': 'isFeatured'\n }\n\n self._id = None\n self._name = None\n self._version = None\n self._tagline = None\n self._keywords = None\n self._short_description = None\n self._usage_information = None\n self._long_description = None\n self._license_model_description = None\n self._system_requirements = None\n self._time_released = None\n self._release_notes = None\n self._categories = None\n self._publisher = None\n self._languages = None\n self._screenshots = None\n self._videos = None\n self._support_contacts = None\n self._support_links = None\n self._documentation_links = None\n self._icon = None\n self._banner = None\n self._regions = None\n self._package_type = None\n self._default_package_version = None\n self._links = None\n self._is_featured = None", "def with_description(self, description):\r\n self.description = description\r\n return self", "def __init__(self, *args, **kwargs):\n\n # Les lignes suivantes permettent de modifier les label d'un champ dans la page\n super(ModelForm, self).__init__(*args, **kwargs)\n self.fields[\"nom_de_l_evenement\"].label = \"Nom de l'évènement\"\n self.fields[\"date_de_l_evenement\"].label = \"Date de l'évènement\" # utiliser plutôt l'attribut label comme pour AbonnementEvenementForm\n self.fields[\"fichier\"].label = \"Photo(s)\"", "def __loadTranslator(self):\n if self.__ui is not None:\n loc = self.__ui.getLocale()\n if loc and loc != \"C\":\n locale_dir = os.path.join(\n os.path.dirname(__file__), \"ProjectDjango\", \"i18n\")\n translation = \"django_{0}\".format(loc)\n translator = QTranslator(None)\n loaded = translator.load(translation, locale_dir)\n if loaded:\n self.__translator = translator\n e5App().installTranslator(self.__translator)\n else:\n print(\"Warning: translation file '{0}' could not be\"\n \" loaded.\".format(translation))\n print(\"Using default.\")", "def __init__(self, use_spacy=True):\n self._use_spacy = use_spacy", "def setup(cls):\n super().setup()\n cls.default_dialogues = cast(\n DefaultDialogues, cls._skill.skill_context.default_dialogues\n )\n cls.tac_dialogues = cast(TacDialogues, cls._skill.skill_context.tac_dialogues)\n cls.oef_search_dialogues = cast(\n OefSearchDialogues, cls._skill.skill_context.oef_search_dialogues\n )", "def test_fallback_variant(self):\n x = SimpleModel()\n\n x.set_current_language(\"de\")\n x.tr_title = \"Hallo-de\"\n\n x.set_current_language(\"en\")\n x.tr_title = \"Hello-en\"\n\n x.save()\n\n with translation.override(\"de-ch\"):\n x = SimpleModel.objects.get(pk=x.pk)\n self.assertEqual(x.tr_title, \"Hallo-de\")", "def visit_classdef(self, node: ClassDef) -> None:\n if node.name != \"TranslatableMeta\" or not isinstance(\n node.parent, ClassDef\n ):\n return\n base_classes = \"translations.models.Translatable\"\n if not node_is_subclass(node.parent, base_classes):\n self.add_message(\"W0001\", node=node.parent)", "def __get__(self, instance, owner):\r\n self.resource_meta = instance\r\n return self", "def setUpClass(cls):\n super(LanguageModelTests, cls).setUpClass()\n cls.lang_en, _ = Language.objects.get_or_create(language='en')\n cls.lang_fr, _ = Language.objects.get_or_create(language='fr')", "def __new__(mcs, class_name, bases, attrs):\n\n # Retrieve the Meta class, if present\n meta = attrs.get('Meta', None)\n conf = None\n\n # Try to retrieve the dynaconf property\n if meta:\n conf = getattr(meta, 'dynaconf', None)\n\n # When found, extend the form's attribute's with the specified ones\n if conf:\n for key, value in conf.iteritems():\n attrs[key] = value\n\n # Leave the rest to djangoforms.ModelFormMetaclass.\n return super(DynaFormMetaclass, mcs).__new__(mcs, class_name, bases, attrs)", "def meta(self) -> api.Meta:\n return self._get_model(model=api.Meta)", "def get_model(self) -> BaseLanguageModel:\n model = available_models[self.model_name.value]\n kwargs = model._lc_kwargs\n secrets = {secret: getattr(model, secret) for secret in model.lc_secrets.keys()}\n kwargs.update(secrets)\n\n model_kwargs = kwargs.get(\"model_kwargs\", {})\n for attr, value in self.dict().items():\n if attr == \"model_name\":\n # Skip model_name\n continue\n if hasattr(model, attr):\n # If the model has the attribute, add it to kwargs\n kwargs[attr] = value\n else:\n # Otherwise, add it to model_kwargs (necessary for chat models)\n model_kwargs[attr] = value\n kwargs[\"model_kwargs\"] = model_kwargs\n\n # Initialize a copy of the model using the config\n model = model.__class__(**kwargs)\n return model", "def __init__(self, **kwargs):\n self.__dict__.update(kwargs)" ]
[ "0.6093402", "0.5498365", "0.5405703", "0.5377071", "0.5377071", "0.52954173", "0.5262987", "0.5173471", "0.5130741", "0.51139444", "0.5081222", "0.5053549", "0.50521266", "0.5046423", "0.50335914", "0.50004405", "0.4973911", "0.4968481", "0.49118015", "0.49080524", "0.48669386", "0.48669386", "0.48619223", "0.48424605", "0.48359898", "0.48178667", "0.47981346", "0.47837883", "0.4783383", "0.47801346", "0.47797638", "0.47738564", "0.4751934", "0.4750174", "0.47406355", "0.47363192", "0.4724429", "0.47213688", "0.47185007", "0.4718464", "0.4708494", "0.47076958", "0.47076926", "0.47024706", "0.47005048", "0.46989152", "0.46871594", "0.46801656", "0.46733642", "0.46623772", "0.465878", "0.4657231", "0.46559697", "0.4655944", "0.46541926", "0.46537137", "0.46534958", "0.4649639", "0.46446058", "0.4623474", "0.4619702", "0.46190366", "0.4616324", "0.4605206", "0.4601017", "0.46002096", "0.45757017", "0.45749357", "0.45686346", "0.45601314", "0.45558563", "0.45558563", "0.45518345", "0.45510828", "0.45484397", "0.45334575", "0.45284018", "0.45266593", "0.45247617", "0.4524252", "0.45165688", "0.45140755", "0.45138597", "0.4513434", "0.451028", "0.45025223", "0.44917196", "0.44906384", "0.4490337", "0.44885483", "0.4487373", "0.44735897", "0.4470458", "0.446888", "0.44643712", "0.44596046", "0.44571456", "0.44544798", "0.44523734", "0.44491825" ]
0.73294145
0
When this instance is saved, also save the (cached) translation
def save_translations(cls, instance, **kwargs): opts = cls._meta if hasattr(instance, opts.translations_cache): trans = getattr(instance, opts.translations_cache) if not trans.master_id: trans.master = instance trans.save()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self, *args, **kwargs):\n\n ret = super(Translation, self).save(*args, **kwargs)\n\n # Does cache invalidation\n cache_key = Translation.objects.make_cache_key(type(self), self.object_id, self.language)\n cache.set(cache_key, self)\n\n return ret", "def save(self, *args, **kwargs):\n\t\tconflicting_instance = Translation.objects.filter(\n\t\t\tterm=self.term,\n\t\t\tlanguage=self.language,\n\t\t\tvenue__isnull=True\n\t\t)\n\n\t\tif self.pk:\n\t\t\tconflicting_instance = conflicting_instance.exclude(pk=self.pk)\n\n\t\tif conflicting_instance.exists():\n\t\t\traise ValidationError({\n\t\t\t\t'error':\n\t\t\t\t\t'Generic translation for this term (%s) and language (%s) already exists.' % (\n\t\t\t\t\t\tself.term, self.language\n\t\t\t\t\t)\n\t\t\t})\n\n\t\tsuper(Translation, self).save(*args, **kwargs)", "def save(self, *args, **kwargs):\n if (\n # Does the public translation have a different title?\n self.__class__.objects.filter(\n master__draft_course_run__translations__pk=self.pk,\n language_code=self.language_code,\n )\n .exclude(title=self.title)\n .exists()\n ):\n self.master.direct_course.extended_object.title_set.filter(\n language=self.language_code\n ).update(\n publisher_state=PUBLISHER_STATE_DIRTY\n ) # mark page dirty\n\n return super().save(*args, **kwargs)", "def set_translation_to_cache ( self, text, src_lang, target_lang, translated_text ):\n self.app_cache.set_translation_to_cache ( text, src_lang, target_lang, translated_text )", "def save(self):\n # TODO (Pierre): code", "def set_name_translation(self):\n\t\tcurrent = self.get_name_translation()\n\t\tif not self.label:\n\t\t\tif current:\n\t\t\t\t# clear translation\n\t\t\t\tfrappe.delete_doc(\"Translation\", current.name)\n\t\t\treturn\n\n\t\tif not current:\n\t\t\tfrappe.get_doc(\n\t\t\t\t{\n\t\t\t\t\t\"doctype\": \"Translation\",\n\t\t\t\t\t\"source_text\": self.doc_type,\n\t\t\t\t\t\"translated_text\": self.label,\n\t\t\t\t\t\"language_code\": frappe.local.lang or \"en\",\n\t\t\t\t}\n\t\t\t).insert()\n\t\t\treturn\n\n\t\tif self.label != current.translated_text:\n\t\t\tfrappe.db.set_value(\"Translation\", current.name, \"translated_text\", self.label)\n\t\t\tfrappe.translate.clear_cache()", "def translate_caching(self, translate_caching):\n\n self._translate_caching = translate_caching", "def save(self):\n\n pass", "def save(self, *args, **kwargs):\n return", "def save(self):\n pass", "def save(self):\n pass", "def save(self):\n pass", "def save(self):\n pass", "def save(self):\n pass", "def save (self):\n pass", "def get_translation(self):", "def test_translation_default_language_cache(self):\n pool = Pool()\n Config = pool.get('ir.configuration')\n Char = self.Char()\n\n with Transaction().set_context(language=Config.get_language()):\n char, = Char.create([{\n 'char': \"foo\",\n }])\n\n char.char = \"bar\"\n char.save()\n\n self.assertEqual(char.char, \"bar\")", "def translate_as(self, lang):\n trans = PublicationLocalization.objects.filter(publication=self,\n language=lang,\n is_active=True).first()\n if trans:\n self.title = trans.title\n self.subheading = trans.subheading\n self.content = trans.content", "def translate(self, language=None):", "def save(self, *args, **kwargs):\n self.key = str(self.key).upper()\n\n do_cache = kwargs.pop('cache', True)\n\n self.clean(**kwargs)\n self.validate_unique(**kwargs)\n\n # Update this setting in the cache\n if do_cache:\n self.save_to_cache()\n\n super().save()\n\n # Get after_save action\n setting = self.get_setting_definition(self.key, *args, **kwargs)\n after_save = setting.get('after_save', None)\n\n # Execute if callable\n if callable(after_save):\n after_save(self)", "def save(self, *args, **kwargs):\n super(self.__class__, self).save(*args, **kwargs)", "def save(self, *args, **kwargs):\n super().save(*args, **kwargs)", "def save(self, *args, **kwargs):\n super().save(*args, **kwargs)", "def _save_lang(self):\n for combobox, (option, _default) in list(self.comboboxes.items()):\n if option == 'interface_language':\n data = combobox.itemData(combobox.currentIndex())\n value = from_qvariant(data, to_text_string)\n break\n save_lang_conf(value)\n self.set_option('interface_language', value)", "def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n self.update_generated_tags()", "def save(self, *args, **kwargs):\n pass", "def save(self, *args, **kwargs):\n\n self.html_text = htmlize(self.text, self.language)\n super(Snippet, self).save(*args, **kwargs)", "def save():\n pass", "def translate(self):\n\t\tself._translate(True)", "def translate(self):\n pass", "def save(self, cfg: AttrOrderedDict, field: Field):\n\t\tfor key in self.cfg_path[:-1]:\n\t\t\tcfg = cast(AttrOrderedDict, cfg.get(key))\n\t\tdata = field.data\n\t\tcfg[self.cfg_path[-1]] = self.translator.save(data)", "def get_set_translation_from_cache ( self, text, src_lang, target_lang ):\n translated_text = self.get_translation_from_cache ( text, src_lang, target_lang )\n if not translated_text:\n translated_text = self.service_creator.get_translation ( text, src_lang, target_lang )\n self.set_translation_to_cache ( text, src_lang, target_lang, translated_text )\n return translated_text", "def test_save_ignore_fallback_marker(self):\n x = SimpleModel()\n x.set_current_language(self.other_lang1)\n x.tr_title = \"TITLE_XX\"\n x.set_current_language(self.other_lang2)\n # try fetching, causing an fallback marker\n x.safe_translation_getter(\"tr_title\", any_language=True)\n # Now save. This should not raise errors\n x.save()", "def save(self, *args, **kwargs):\n self._update_search_tokens()\n super().save(*args, **kwargs)", "def save(self) -> None:\n pass", "def save(self) -> None:\n pass", "def save(self) -> None:\n pass", "def save(self):\n from models import storage\n self.updated_at = datetime.datetime.now()\n storage.save()", "def save(self):\n lang = self.languageCombo.currentText()\n kwSet = self.setSpinBox.value()\n self.__keywords[lang][\"Sets\"][kwSet] = self.keywordsEdit.toPlainText()\n \n for lang, keywords in self.__keywords.items():\n Preferences.setEditorKeywords(lang, keywords[\"Sets\"])", "def save(self):\n from models import storage\n self.updated_at = datetime.now()\n storage.save()", "def save():", "def save(self):\n raise NotImplementedError", "def save(self):\n raise NotImplementedError", "def save(self):\n raise NotImplementedError", "def save_now(self):\r\n self.save()", "def save_now(self):\r\n self.save()", "def save(self, *args, **kwargs) -> None:\n pass", "def save(self, *args, **kwargs) -> None:\n pass", "def save(self, *args, **kwargs) -> None:\n pass", "def __init__(self, language=None):\n self.language = language\n self.translations = {}", "def _save(self, **kwargs): #signal, sender, instance):\r\n tags = self._get_instance_tag_cache(kwargs['instance'])\r\n if tags is not None:\r\n Tag.objects.update_tags(kwargs['instance'], tags)", "def save(self):\r\n self.updated_at = datetime.now()\r\n models.storage.save()", "def save(self) -> None:\n self._save_marker = self._current", "def dico_save(self):\r\n\t\tdic = (self.__dict__).copy()\r\n\t\tdel dic['trparas']\r\n\t\tdic.update(self.trparas.dico_save())\r\n\t\treturn dic", "def translate(self, oracion: str) -> None:\n\n if self.saved_translator:\n result = self.saved_translator(oracion).numpy()\n print(f\"... English translation: {result}\\n\")\n else:\n print(\"INFO: Couldn't find a saved model. Train the translator first with the `train` command.\\n\")", "def test_save_multiple(self):\n x = SimpleModel()\n x.set_current_language(\"en\")\n x.tr_title = \"TITLE_EN\"\n x.set_current_language(\"fr\")\n x.tr_title = \"TITLE_FR\"\n x.set_current_language(\"es\")\n x.tr_title = \"TITLE_ES\"\n x.set_current_language(\"nl\")\n x.tr_title = \"TITLE_NL\"\n\n x.save()\n\n # Check if all translations are saved.\n self.assertEqual(\n sorted(x.translations.values_list(\"tr_title\", flat=True)),\n [\"TITLE_EN\", \"TITLE_ES\", \"TITLE_FR\", \"TITLE_NL\"],\n )\n self.assertEqual(sorted(x.get_available_languages()), [\"en\", \"es\", \"fr\", \"nl\"])\n self.assertTrue(x.has_translation(\"en\"))\n self.assertTrue(x.has_translation(\"es\"))\n self.assertFalse(x.has_translation(\"fi\"))\n\n # Update 2 translations.\n # Only those should be updated in the database.\n x.set_current_language(\"es\")\n x.tr_title = \"TITLE_ES2\"\n x.set_current_language(\"nl\")\n x.tr_title = \"TITLE_NL2\"\n\n self.assertNumQueries(2, x.save_translations())\n\n # Even unmodified language is automatically saved.\n x.set_current_language(\"it\", initialize=True)\n self.assertTrue(x.has_translation(\"it\")) # does return true for this object.\n self.assertNumQueries(1, lambda: x.save_translations())\n self.assertEqual(sorted(x.get_available_languages()), [\"en\", \"es\", \"fr\", \"it\", \"nl\"])", "def save(self, filename):\r\n options = conf.lib.clang_defaultSaveOptions(self)\r\n result = int(conf.lib.clang_saveTranslationUnit(self, filename,\r\n options))\r\n if result != 0:\r\n raise TranslationUnitSaveError(result,\r\n 'Error saving TranslationUnit.')", "def save(self) -> None:\n filename = \"users/\" + \"_\".join([self.name, self.lang, self.mode, self.time]) + '.json'\n\n state = {\n 'name': self.name,\n 'lang': self.lang,\n 'mode': self.mode,\n 'time': self.time,\n 'has_times': self.has_times,\n 'has_persons': self.has_persons,\n 'persons_translation': self.persons_translation,\n 'persons': self.persons,\n 'min_to_review': self.min_to_review,\n 'practice_list': self.practice_list,\n 'total_right': self.total_right,\n 'total_answers': self.total_answers\n }\n\n with open(filename, 'w') as file:\n json.dump(state, file, indent=2)", "def get_translation(self):\n return self.translation", "def translation(self):\n return self._translation", "def save(self, *args, **kwargs):\n if self.state: self.state.save()", "def save(self):\n self.backend.save(list(self._d.items()))\n log.debug(\"save: {}\".format(self.backend.filename))", "def save(self):\n\n self.updated_at = datetime.now()\n models.storage.save()", "def save(self):\n self.rpc.call(MsfRpcMethod.CoreSave)", "def record(self, translation, comments=None):\n self.translation = translation\n if comments:\n self.comments = comments", "def store_as_django_locale(locale, content):\n\n filepath = \"%s%s\" % (DJANGO_I18N_OUTPUT_PATH, f\"%s/LC_MESSAGES/django.po\" % locale)\n\n # If the language does not exist yet, make the folder supporting this language.\n os.makedirs(Path(filepath).parent, exist_ok=True)\n\n with open(filepath, 'w') as f:\n f.write(content.decode('UTF-8'))", "def save(self):\n memento = self.create_memento()\n import datetime\n f = open(str(datetime.datetime.now()).replace(' ','_')+'.saved_story','w')\n cPickle.dump(memento,f)\n f.close()\n zcanvas.message(\"Saved!\")", "def set_translated_id(id, translated, lang):", "def save(self):\n raise NotImplementedError()", "def translate(self, to_lang: str = TARGET_LANG):\n if not self.language:\n self.detect_language()\n if not all([self.clean, self.language != to_lang]):\n return\n self.payload += '&source={}&target={}'.format(self.language, to_lang)\n resp = requests.request('POST', self.url_translation, data=self.payload.encode('utf-8'),\n headers=self.translate_headers)\n try:\n self.translation = json.loads(resp.text)['data']['translations'][0]['translatedText']\n except KeyError:\n return", "def _save(self):\n self.logger.debug(\"Saving to persistence\")\n try:\n data = self.persistence_serialize()\n except NotImplementedError:\n # allow backwards compatibility or persisted_values way\n # generate item to be persisted by gathering all variables\n # to be persisted into a dictionary\n data = {persisted_var: getattr(self, persisted_var)\n for persisted_var in self.persisted_values()}\n\n # save generated dictionary under block's id\n self._persistence.save(data, self.id())", "def save(self):\n self.updated_at = datetime.now()\n models.storage.save()", "def save(self):\n self.updated_at = datetime.now()\n models.storage.save()", "def save(self):\n self.updated_at = datetime.now()\n models.storage.save()", "def save(self, *args, **kwargs) -> None:\n self.last_updated_time = get_now()\n super(AbstractLayer, self).save(*args, **kwargs)", "def translate():\n pass", "def save_data(self):\n pass", "def save(self, *args, **kwargs):\n self.slug = \"/\".join([\n slugify(__class__.__name__.lower()),\n settings.PK_PLACEHOLDER,\n slugify(self.name)\n ])\n super(__class__, self).save(*args, **kwargs)", "def save(self, *args, **kwargs):\n self.modify_ts = datetime.now()\n super(ModelBase, self).save(*args, **kwargs)", "def save(self):\n return None", "def save(self):\n response = settings.database.put_item(Item=self.to_dict())\n raise_for_response(response)", "def retranslateUi(self):\r\n _translate = QtCore.QCoreApplication.translate\r\n self.WindowSave.setWindowTitle(_translate(\"self.WindowSave\", \"WindowSave\"))\r\n self.label.setText(_translate(\"self.WindowSave\", self.textlabel))\r\n self.labelTxt.setText(_translate(\"self.WindowSave\", \" .txt \"))", "def test_fallback_variant(self):\n x = SimpleModel()\n\n x.set_current_language(\"de\")\n x.tr_title = \"Hallo-de\"\n\n x.set_current_language(\"en\")\n x.tr_title = \"Hello-en\"\n\n x.save()\n\n with translation.override(\"de-ch\"):\n x = SimpleModel.objects.get(pk=x.pk)\n self.assertEqual(x.tr_title, \"Hallo-de\")", "def get_translation ( self ):\n self.verify_post_data ( )\n\n text = request.json[ 'text' ]\n src_lang = request.json[ 'source_lang' ]\n target_lang = request.json[ 'target_lang' ]\n\n # if translation is available in cache, just fetch it from there. Otherwise use translation service.\n translated_text = self.get_set_translation_from_cache ( text, src_lang, target_lang )\n\n return jsonify ( {\"Translation\": translated_text} )", "def save_plugin_data(self):\n return", "def saveText(self):\n self.rsubject.saveOnChanged(self.edCursor.getPos())\n\n # Allows saving after a certain number of delete operations:\n self.deleteCount = 0", "def save(self, request=None):\n return super().save()", "def save(self):\n self.updated_at = datetime.now()\n storage.save()", "def save(self):\n self.updated_at = datetime.now()\n storage.save()", "def save(self, *args, **kwargs):\n self.modified_at = datetime.datetime.utcnow()\n return super().save(*args, **kwargs)", "def save(self, *args, **kwargs):\n self.modified_at = datetime.datetime.utcnow()\n return super().save(*args, **kwargs)", "def translations_en_text(self, translations_en_text):\n\n self._translations_en_text = translations_en_text", "def transliteration(self, transliteration):\n\n self._transliteration = transliteration", "def save(self, obj):", "def save(self):\n self.updated_at = datetime.today()\n models.storage.save()", "def save(self):\n self.updated_at = datetime.today()\n models.storage.save()", "def fetchTranslation(self, language):\n pass", "def on_transLanguageComboBox_currentIndexChanged(self, index):\n self.__plugin.setPreferences(\n \"TranslationLanguage\", self.transLanguageComboBox.itemData(index))", "def save(self, *args, **kwargs) -> Any:\n pass", "def save(self):\n pickle.dump([self.word2vec, self.img2sentence, self.word_freq, self.num_words, self.word2idx, self.idx2word], open(self.save_file, 'wb'), protocol=4)" ]
[ "0.81048036", "0.6967799", "0.66896576", "0.64093536", "0.6083553", "0.60599184", "0.6016258", "0.59257364", "0.5915563", "0.59141093", "0.59141093", "0.59141093", "0.59141093", "0.59141093", "0.59109294", "0.589746", "0.5893489", "0.5889759", "0.58738244", "0.58508587", "0.5847226", "0.5837558", "0.5837558", "0.58365685", "0.58052343", "0.58034444", "0.5790452", "0.57768387", "0.57489663", "0.5728682", "0.5719944", "0.570667", "0.5705324", "0.5696091", "0.56943786", "0.56943786", "0.56943786", "0.56767285", "0.56765366", "0.5675127", "0.5672471", "0.5593029", "0.5593029", "0.5593029", "0.5588891", "0.5588891", "0.5587391", "0.5587391", "0.5587391", "0.55782205", "0.5558114", "0.55539554", "0.5542996", "0.5539082", "0.55385983", "0.5536355", "0.55214345", "0.55045015", "0.55026436", "0.54960114", "0.5495409", "0.54926926", "0.5484159", "0.5479354", "0.5476393", "0.54749745", "0.54617226", "0.5460888", "0.54552764", "0.5453315", "0.5450967", "0.5440359", "0.5440359", "0.5440359", "0.5419513", "0.53925204", "0.538393", "0.53796613", "0.5368331", "0.5366744", "0.536452", "0.5334344", "0.5330529", "0.53264135", "0.5319683", "0.53144294", "0.5301985", "0.53009045", "0.53009045", "0.5294775", "0.5294775", "0.52912486", "0.52862406", "0.5279061", "0.52775574", "0.52775574", "0.5276942", "0.5273409", "0.52661395", "0.5261907" ]
0.7327054
1
Add various entries to a cube dictionary and sort by socket index
def normalize(district, purpose, cube_dictionary_list): for gui_index, cube_dictionary in enumerate(cube_dictionary_list): cube_dictionary["district"] = district cube_dictionary["purpose"] = purpose cube_dictionary["gui_index"] = gui_index # Dictionary is given in a natural gui display order. Remember it. return sorted(cube_dictionary_list, key=lambda item: item["socket"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_vertex_to_clusters(clusters,vertex):\n for key in clusters:\n clusters[key].append(vertex)", "def make_neighbor_db(data):\n acted_with = {}\n for i, j, _ in data:\n # the setdefault method lets us avoid checking for ourselves whether an\n # actor is aclready in the dictionary.\n # see https://docs.python.org/3/library/stdtypes.html#dict.setdefault\n acted_with.setdefault(i, set()).add(j)\n acted_with.setdefault(j, set()).add(i)\n return acted_with", "def cluster(self):\n\t\tself.index[\"cluster\"] = {}\n\n\t\tfor item in self.index[\"items\"]:\n\t\t\tself.index[\"cluster\"][item] = [{\"weight\" : float(len(set(self.index[\"items\"][item]).intersection( set(self.index[\"items\"][id]))))/float(len(self.index[\"items\"][item])) , \"name\" : id, \"authority\" : set(self.index[\"items\"][item]).intersection( set(self.index[\"items\"][id])) } for id in self.index[\"items\"] if id != item and len(set(self.index[\"items\"][item]).intersection( set(self.index[\"items\"][id]))) >= 1]\n\n\t\treturn self.index", "def __init__(self):\r\n self.vertices = col.defaultdict()\r\n self.edges = col.defaultdict(list)", "def rebuild_indexes(self):\n self.cards = sorted(self.name_to_card.values(), key=lambda card: card.name)\n self.card_sets = sorted(\n self.code_to_card_set.values(), key=lambda cset: cset.release_date\n )\n\n self.set_code_to_printings = collections.defaultdict(list)\n self.card_name_to_printings = collections.defaultdict(list)\n self.set_name_num_mv_to_printings = collections.defaultdict(list)\n\n for printing in self.id_to_printing.values():\n self.set_code_to_printings[printing.set_code].append(printing)\n self.card_name_to_printings[printing.card_name].append(printing)\n # snnm == (set, name, number, multiverseid)\n snnm_index_keys = {\n # pylint: disable=line-too-long\n (\n printing.set_code,\n printing.card_name,\n printing.set_number,\n printing.multiverseid,\n ),\n (printing.set_code, printing.card_name, None, printing.multiverseid),\n (printing.set_code, printing.card_name, printing.set_number, None),\n (printing.set_code, printing.card_name, None, None),\n }\n for key in snnm_index_keys:\n self.set_name_num_mv_to_printings[key].append(printing)\n\n for printings in self.set_code_to_printings.values():\n printings.sort(key=set_code_to_printings_key)\n\n for printings in self.card_name_to_printings.values():\n printings.sort(key=card_name_to_printing_key)\n\n # Build ordered indexes\n self.set_code_to_printing_to_row = {}\n for set_code, printings in self.set_code_to_printings.items():\n self.set_code_to_printing_to_row[set_code] = {\n printing: i for i, printing in enumerate(printings)\n }", "def update_id2idx(self):\n self._id2idx = {}\n for n, cell in enumerate(self._cell_list):\n self._id2idx[cell.id()] = n", "def index_add(all_index, this_index, samples, caller):\n for key, record in this_index.iteritems():\n if key not in all_index:\n all_index[key] = {}\n for sample_id in samples:\n if sample_id not in all_index[key]:\n all_index[key][sample_id] = {caller: []}\n elif caller not in all_index[key][sample_id]:\n all_index[key][sample_id][caller] = []\n # NB: If caller was run twice, will have 2 records here\n all_index[key][sample_id][caller].append(record)", "def _sort_cubelist(self, cubelist):\n sorted_cubelist = []\n realization_num = 1\n cubelist = cubelist.merge(unique=False)\n for cube in cubelist:\n # If time is a scalar coordinate, promote it to a dimension \n # coordinate, this is because all cubes must have the same number \n # of dimensions to be compared.\n if len(cube.coord(self.time_coord).points) == 1:\n cube = iris.util.new_axis(cube, scalar_coord=self.time_coord)\n \n # Chop cubes into individual realizations for relabelling.\n member_slices = get_coordinate_slice_dimensions(\n cube, [self.realization,self.forecast_ref_time],\n ignore_missing_coords=True)\n for member_slice in cube.slices(member_slices):\n \n if self.realization in [coord.name() \n for coord in member_slice.coords()]:\n member_slice.coord(\n self.realization).points = [realization_num]\n else:\n realization_coord = iris.coords.AuxCoord([realization_num],\n self.realization)\n member_slice.add_aux_coord(realization_coord)\n \n member_slice.cell_methods = None\n sorted_cubelist.append(member_slice)\n realization_num += 1\n \n sorted_cubelist = iris.cube.CubeList(sorted_cubelist)\n # Mask missing time steps so merging can be done.\n sorted_cubelist = pad_coords(sorted_cubelist, self.time_coord)\n cube = sorted_cubelist.merge_cube()\n # Check x-y coordinates match the specified range.\n cube = self._area_inst.check_cube_area_bounds(cube, self.xy_coords, \n self.area_bounds)\n cube = self.extract_area_bounds(cubes=cube)\n \n if cube.coord_dims(cube.coord(self.realization)) == \\\n cube.coord_dims(cube.coord(self.forecast_ref_time)):\n # Re order realizations in initialisation date order.\n ordered_inits = sorted(cube.coord('forecast_reference_time').points)\n ordered_mems = range(1, len(cube.coord('realization').points)+1)\n ordered_cubes = []\n for member_slice in cube.slices(member_slices):\n mem_index = ordered_inits.index(\n member_slice.coord(self.forecast_ref_time).points[0])\n member_slice.coord('realization').points = ordered_mems[mem_index]\n del ordered_inits[mem_index]\n del ordered_mems[mem_index]\n ordered_cubes.append(member_slice)\n cube = iris.cube.CubeList(ordered_cubes).merge_cube()\n \n return cube", "def create_grids_structure(self):\n for indices, hypercube in np.ndenumerate(self.hypercubes):\n self.hypercubes[indices] = Hypercube(coords=indices)", "def build_inverted_index(msgs):\n # YOUR CODE HERE\n inverted_idx = dict()\n\n temp = dict()\n\n # msgs here is the item dict \n for item in msgs:\n temp[item['id']] = item\n\n for i in range(1,9046):\n if i in temp:\n item = temp[i]\n toks = tokenize(item['name']) + tokenize(item['better'])\n counts = Counter(toks)\n for word, value in counts.items():\n if word in inverted_idx.keys():\n inverted_idx[word].append((item['id'],value))\n else:\n inverted_idx[word] = [(item['id'], value)]\n\n return inverted_idx", "def __node_rep(self):\n node_list_dict = {}\n for (i, beam) in enumerate(self.beams):\n if str(beam['n1']) not in node_list_dict.keys():\n node_list_dict[str(beam['n1'])] = 1\n else:\n node_list_dict[str(beam['n1'])] += 1\n if str(beam['n2']) not in node_list_dict.keys():\n node_list_dict[str(beam['n2'])] = 1\n else:\n node_list_dict[str(beam['n2'])] += 1\n return node_list_dict", "def add_vertex(self, vertex):\n self[vertex] = {}", "def update_chunk(self):\n for key, value in self.piece_coordinates.items():\n # Why is the key a numpy.int type ???\n self.chunk[value] = key", "def _create_idx(self):\n self._idx = {}\n for idx, (L, M, N) in enumerate(self.modes):\n if L not in self._idx:\n self._idx[L] = {}\n if M not in self._idx[L]:\n self._idx[L][M] = {}\n self._idx[L][M][N] = idx", "def _sort_membind_info(membind_bind_info):\n membind_cpu_list = []\n nodes_count = int(max(element[2] for element in membind_bind_info)) + 1\n # Sort list by Node id\n for node_number in range(nodes_count):\n node_core_list = []\n core_info = {}\n for entry in membind_bind_info:\n cpu_id = int(entry[0])\n core_id = int(entry[1])\n node_id = int(entry[2])\n # On a machine where there is no NUMA nodes, entry[3] could be empty, so set socket_id = -1\n if entry[3] != \"\":\n socket_id = int(entry[3])\n else:\n socket_id = -1\n\n # Skip nodes other than current node number\n if node_number != node_id:\n continue\n\n # Add core info\n if cpu_id == core_id:\n core_info.update({\n core_id: {\n \"cpu_id\": cpu_id,\n \"node_id\": node_id,\n \"socket_id\": socket_id,\n },\n })\n else:\n # Add information about Hyper Threading\n core_info[core_id][\"ht_cpu_id\"] = cpu_id\n\n # Change dict of dicts to list of dicts\n for iterator in range(len(core_info)):\n curr_core_id = len(core_info) * node_number + iterator\n single_core_info = core_info.get(curr_core_id)\n if single_core_info:\n node_core_list.append(single_core_info)\n\n membind_cpu_list.append(node_core_list)\n\n return membind_cpu_list", "def __init__(self):\n self.vertList = {}\n self.vertCount = 0", "def _getitem3d(self, index):\n\n lovects = self._getlovects()\n hivects = self._gethivects()\n fields = self._getfields()\n\n ix = index[0]\n iy = index[1]\n iz = index[2]\n\n if len(fields[0].shape) > self.dim:\n ncomps = fields[0].shape[-1]\n else:\n ncomps = 1\n\n if len(index) > self.dim:\n if ncomps > 1:\n ic = index[-1]\n else:\n raise Exception('Too many indices given')\n else:\n ic = None\n\n nx = hivects[0,:].max() - self.nghosts\n ny = hivects[1,:].max() - self.nghosts\n nz = hivects[2,:].max() - self.nghosts\n\n if npes > 1:\n nx = comm_world.allreduce(nx, op=mpi.MAX)\n ny = comm_world.allreduce(ny, op=mpi.MAX)\n nz = comm_world.allreduce(nz, op=mpi.MAX)\n\n if isinstance(ix, slice):\n ixstart = max(ix.start or -self.nghosts, -self.nghosts)\n ixstop = min(ix.stop or nx + 1 + self.nghosts, nx + self.overlaps[0] + self.nghosts)\n else:\n ixstart = ix\n ixstop = ix + 1\n if isinstance(iy, slice):\n iystart = max(iy.start or -self.nghosts, -self.nghosts)\n iystop = min(iy.stop or ny + 1 + self.nghosts, ny + self.overlaps[1] + self.nghosts)\n else:\n iystart = iy\n iystop = iy + 1\n if isinstance(iz, slice):\n izstart = max(iz.start or -self.nghosts, -self.nghosts)\n izstop = min(iz.stop or nz + 1 + self.nghosts, nz + self.overlaps[2] + self.nghosts)\n else:\n izstart = iz\n izstop = iz + 1\n\n # --- Setup the size of the array to be returned and create it.\n # --- Space is added for multiple components if needed.\n sss = (max(0, ixstop - ixstart),\n max(0, iystop - iystart),\n max(0, izstop - izstart))\n if ncomps > 1 and ic is None:\n sss = tuple(list(sss) + [ncomps])\n resultglobal = np.zeros(sss, dtype=_libwarpx._numpy_real_dtype)\n\n datalist = []\n for i in range(len(fields)):\n\n # --- The ix1, 2 etc are relative to global indexing\n ix1 = max(ixstart, lovects[0,i])\n ix2 = min(ixstop, lovects[0,i] + fields[i].shape[0])\n iy1 = max(iystart, lovects[1,i])\n iy2 = min(iystop, lovects[1,i] + fields[i].shape[1])\n iz1 = max(izstart, lovects[2,i])\n iz2 = min(izstop, lovects[2,i] + fields[i].shape[2])\n\n if ix1 < ix2 and iy1 < iy2 and iz1 < iz2:\n\n sss = (slice(ix1 - lovects[0,i], ix2 - lovects[0,i]),\n slice(iy1 - lovects[1,i], iy2 - lovects[1,i]),\n slice(iz1 - lovects[2,i], iz2 - lovects[2,i]))\n if ic is not None:\n sss = tuple(list(sss) + [ic])\n\n vslice = (slice(ix1 - ixstart, ix2 - ixstart),\n slice(iy1 - iystart, iy2 - iystart),\n slice(iz1 - izstart, iz2 - izstart))\n\n datalist.append((vslice, fields[i][sss]))\n\n if npes == 1:\n all_datalist = [datalist]\n else:\n all_datalist = comm_world.allgather(datalist)\n\n for datalist in all_datalist:\n for vslice, ff in datalist:\n resultglobal[vslice] = ff\n\n # --- Now remove any of the reduced dimensions.\n sss = [slice(None), slice(None), slice(None)]\n if not isinstance(ix, slice):\n sss[0] = 0\n if not isinstance(iy, slice):\n sss[1] = 0\n if not isinstance(iz, slice):\n sss[2] = 0\n\n return resultglobal[tuple(sss)]", "def _sort_data(self, cubelist):\n sorted_cubelist = []\n for dates in self.dates:\n year_cubelist = self.extract_dates(dates, cubelist)\n for cube in year_cubelist.merge():\n # Check x-y coordinates match the specified range.\n cube = self._area_inst.check_cube_area_bounds(cube, \n self.xy_coords, \n self.area_bounds)\n cube = self.extract_area_bounds(cubes=cube)\n sorted_cubelist.append(cube)\n return iris.cube.CubeList(sorted_cubelist)", "def build_sample_map(flowcell):\n result = {}\n rows = [(lane, lib[\"name\"]) for lib in flowcell[\"libraries\"] for lane in lib[\"lanes\"]]\n i = 1\n for _, name in sorted(set(rows)):\n if name not in result:\n result[name] = \"S{}\".format(i)\n i += 1\n return result", "def _create_dnp3_object_map(self):\n\n feeders = self.file_dict.get(\"feeders\", [])\n measurements = list()\n capacitors = list()\n regulators = list()\n switches = list()\n solarpanels = list()\n batteries = list()\n fuses = list()\n breakers = list()\n reclosers = list()\n energyconsumers = list()\n for x in feeders:\n measurements = x.get(\"measurements\", [])\n capacitors = x.get(\"capacitors\", [])\n regulators = x.get(\"regulators\", [])\n switches = x.get(\"switches\", [])\n solarpanels = x.get(\"solarpanels\", [])\n batteries = x.get(\"batteries\", [])\n fuses = x.get(\"fuses\", [])\n breakers = x.get(\"breakers\", [])\n reclosers = x.get(\"reclosers\", [])\n energyconsumers = x.get(\"energyconsumers\", [])\n\n # Unique grouping of measurements - GroupBy Name, Type and Connectivity node\n groupByNameTypeConNode = defaultdict(list) \n for m in measurements:\n groupByNameTypeConNode[m['name']+m.get(\"measurementType\")+m.get(\"ConnectivityNode\")].append(m)\n\n # Create Net Phase DNP3 Points\n for grpM in groupByNameTypeConNode.values():\n\n if grpM[0]['MeasurementClass'] == \"Analog\" and grpM[0].get(\"measurementType\") == \"VA\":\n measurement_type = grpM[0].get(\"measurementType\")\n measurement_id = m.get(\"mRID\")\n \n\n name1 = grpM[0]['name'] + '-' + \"Phases:ABC\" + '-net-VAR-value'\n name2 = grpM[0]['name'] + '-' + \"Phases:ABC\" + '-net-Watts-value'\n name3 = grpM[0]['name'] + '-' + \"Phases:ABC\" + '-net-VA-value'\n\n description1 = \"Name:\" + grpM[0]['name'] + \",MeasurementType:\" + \"net-VAR\" + \",ConnectivityNode:\" + grpM[0].get(\"ConnectivityNode\") +\",SimObject:\" + grpM[0].get(\"SimObject\")\n description2 = \"Name:\" + grpM[0]['name'] + \",MeasurementType:\" + \"net-Watts\" + \",ConnectivityNode:\" + grpM[0].get(\"ConnectivityNode\") +\",SimObject:\" + grpM[0].get(\"SimObject\")\n description3 = \"Name:\" + grpM[0]['name'] + \",MeasurementType:\" + \"net-VA\" + \",ConnectivityNode:\" + grpM[0].get(\"ConnectivityNode\") +\",SimObject:\" + grpM[0].get(\"SimObject\")\n\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name1, description1, measurement_type, measurement_id)\n self.c_ai += 1\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name2, description2, measurement_type, measurement_id)\n self.c_ai += 1\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name3, description3, measurement_type, measurement_id)\n self.c_ai += 1\n\n # Create Each Phase DNP3 Points\n for m in measurements:\n attribute = attribute_map['regulators']['attribute']\n measurement_type = m.get(\"measurementType\")\n measurement_id = m.get(\"mRID\")\n name= m['name'] + '-' + m['phases']\n description = \"Name:\" + m['name'] + \",Phase:\" + m['phases'] + \",MeasurementType:\" + measurement_type + \",ConnectivityNode:\" + m.get(\"ConnectivityNode\") +\",SimObject:\" + m.get(\"SimObject\")\n if m['MeasurementClass'] == \"Analog\":\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name, description, measurement_type, measurement_id)\n self.c_ai += 1\n\n if m.get(\"measurementType\") == \"VA\":\n measurement_id = m.get(\"mRID\")\n name1 = m['name'] + '-' + m['phases'] + '-VAR-value'\n name2 = m['name'] + '-' + m['phases'] + '-Watts-value'\n name3 = m['name'] + '-' + m['phases'] + '-angle'\n\n description1 = \"Name:\" + m['name'] + \",Phase:\" + m['phases'] + \",MeasurementType:\" + \"VAR\" + \",ConnectivityNode:\" + m.get(\"ConnectivityNode\") +\",SimObject:\" + m.get(\"SimObject\")\n description2 = \"Name:\" + m['name'] + \",Phase:\" + m['phases'] + \",MeasurementType:\" + \"Watt\" + \",ConnectivityNode:\" + m.get(\"ConnectivityNode\") +\",SimObject:\" + m.get(\"SimObject\")\n description3 = \"Name:\" + m['name'] + \",Phase:\" + m['phases'] + \",MeasurementType:\" + \"angle\" + \",ConnectivityNode:\" + m.get(\"ConnectivityNode\") + \",SimObject:\" + m.get(\"SimObject\")\n if m['MeasurementClass'] == \"Analog\":\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name1, description1, measurement_type, measurement_id)\n self.c_ai += 1\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name2, description2, measurement_type, measurement_id)\n self.c_ai += 1\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name3, description3, measurement_type, measurement_id)\n self.c_ai += 1\n\n\n elif m['MeasurementClass'] == \"Discrete\" and measurement_type == \"Pos\":\n if \"RatioTapChanger\" in m['name'] or \"reg\" in m[\"SimObject\"]:\n # TODO: Do we need step?\n for r in range(5, 7): # [r==4]: Step, [r==5]: LineDropR, [r==6]:LineDropX \n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id, attribute[r])\n self.c_ao += 1\n else:\n self.assign_val_a(\"DI\", 1, 2, self.c_di, name, description, measurement_type, measurement_id)\n self.c_di += 1\n\n for m in capacitors:\n measurement_id = m.get(\"mRID\")\n cap_attribute = attribute_map['capacitors']['attribute'] # type: List[str]\n\n for l in range(0, 4):\n # publishing attribute value for capacitors as Bianry/Analog Input points based on phase attribute\n name = m['name']\n description = \"Name:\" + m['name'] + \"ConductingEquipment_type:LinearShuntCompensator\" + \",Attribute:\" + cap_attribute[l] + \",Phase:\" + m['phases']\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id, cap_attribute[l])\n self.c_ao += 1\n for p in range(0, len(m['phases'])):\n name = m['name'] + m['phases'][p]\n description = \"Name:\" + m['name'] + \",ConductingEquipment_type:LinearShuntCompensator\" + \",controlAttribute:\" + cap_attribute[p] + \",Phase:\" + m['phases'][p]\n # description = \"Capacitor, \" + m['name'] + \",\" + \"phase -\" + m['phases'][p] + \", and attribute is - \" + cap_attribute[4]\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name, description, measurement_id, cap_attribute[4])\n self.c_do += 1\n\n for m in regulators:\n reg_attribute = attribute_map['regulators']['attribute']\n # bank_phase = list(m['bankPhases'])\n for n in range(0, 4):\n measurement_id = m.get(\"mRID\")\n name = m['bankName'] + '-' + m['bankPhases']\n description = \"Name:\" + m['bankName'] + \",ConductingEquipment_type:RatioTapChanger_Reg\" +\",Phase:\" + m['bankPhases'] + \",Attribute:\" + reg_attribute[n]\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id[0], reg_attribute[n])\n self.c_ao += 1\n self.assign_val_d(\"AI\", 30, 1, self.c_ai, name, description, measurement_id[0], reg_attribute[n])\n self.c_ai += 1\n for i in range(5, 7):\n for j in range(0, len(m['bankPhases'])):\n measurement_id = m.get(\"mRID\")[j]\n name = m['tankName'][j] + '-' + m['bankPhases'][j]\n description = \"Name:\" + m['tankName'][j] + \",ConductingEquipment_type:RatioTapChanger_Reg\"+ \",Phase:\" + m['bankPhases'][j] + \",controlAttribute:\" + reg_attribute[i]\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id,reg_attribute[i])\n self.c_ao += 1\n self.assign_val_d(\"AI\", 30, 1, self.c_ai, name, description, measurement_id,reg_attribute[i])\n self.c_ai += 1\n \n for m in solarpanels:\n for k in range(0, len(m['phases'])):\n measurement_id = m.get(\"mRID\")\n name = \"Solar\" + m['name'] + '-' + m['phases'][k] + '-Watts-value'\n description = \"Solarpanel:\" + m['name'] + \",Phase:\" + m['phases'] + \",measurementID:\" + measurement_id\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id, \"PowerElectronicsConnection.p\")\n self.c_ao += 1\n \n name1 = \"Solar\" + m['name'] + '-' + m['phases'][k] + '-VAR-value'\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name1, description, measurement_id, \"PowerElectronicsConnection.q\")\n self.c_ao += 1\n \n name2 = \"Solar\" + m['name'] + '-' + m['phases'][k] + '-VAR-Net-value'\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name2, description, measurement_id, \"PowerElectronicsConnection.q\")\n self.c_ao += 1\n \n name3 = \"Solar\"+ m['name'] + '-' + m['phases'][k] + '-Watts-Net-value'\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name3, description, measurement_id, \"PowerElectronicsConnection.p\")\n self.c_ao += 1\n\t\t\t\n for m in batteries:\n for l in range(0, len(m['phases'])):\n measurement_id = m.get(\"mRID\")\n name = m['name'] + '-' + m['phases'][l] + '-Watts-value'\n description = \"Battery, \" + m['name'][l] + \",Phase: \" + m['phases'] + \",ConductingEquipment_type:PowerElectronicConnections\"\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description,measurement_id, \"PowerElectronicsConnection.p\")\n self.c_ao += 1\n name1 = m['name'] + '-' + m['phases'][l] + '-VAR-value'\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name1, description,measurement_id, \"PowerElectronicsConnection.q\")\n self.c_ao += 1\n \n for m in switches:\n measurement_id = m.get(\"mRID\")\n switch_attribute = attribute_map['switches']['attribute']\n for k in range(0, len(m['phases'])):\n phase_value = list(m['phases'])\n name = m['name'] + \"Phase:\" + m['phases'][k]\n description = \"Name:\" + m[\"name\"] + \",ConductingEquipment_type:LoadBreakSwitch\" + \"Phase:\" + phase_value[k] +\",controlAttribute:\"+switch_attribute\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name, description, measurement_id, switch_attribute)\n self.c_do += 1\n\n for m in fuses:\n measurement_id = m.get(\"mRID\")\n switch_attribute = attribute_map['switches']['attribute']\n for l in range(0, len(m['phases'])):\n phase_value = list(m['phases'])\n name = m['name'] + \"Phase:\" + m['phases'][l]\n description = \"Name:\" + m[\"name\"] + \",Phase:\" + phase_value[l] + \",Attribute:\" + switch_attribute + \",mRID\" + measurement_id\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name, description, measurement_id, switch_attribute)\n self.c_do += 1\n\n for m in breakers:\n measurement_id = m.get(\"mRID\")\n switch_attribute = attribute_map['switches']['attribute']\n for n in range(0, len(m['phases'])):\n phase_value = list(m['phases'])\n name = m['name'] + \"Phase:\" + m['phases'][n]\n description = \"Name: \" + m[\"name\"] + \",Phase:\" + phase_value[n] + \",ConductingEquipment_type:Breaker\" + \",controlAttribute:\" + switch_attribute\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name, description, measurement_id, switch_attribute)\n self.c_do += 1\n \n for m in reclosers:\n measurement_id = m.get(\"mRID\")\n switch_attribute = attribute_map['switches']['attribute']\n for i in range(0, len(m['phases'])):\n phase_value = list(m['phases'])\n name = m['name'] + \"Phase:\" + m['phases'][i]\n description = \"Recloser, \" + m[\"name\"] + \"Phase: - \" + phase_value[i] + \",ConductingEquipment_type:Recloser\"+\"controlAttribute:\" + switch_attribute\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name, description, measurement_id, switch_attribute)\n self.c_do += 1\n\n for m in energyconsumers:\n measurement_id = m.get(\"mRID\")\n for k in range(0, len(m['phases'])):\n phase_value = list(m['phases'])\n name = m['name']+\"phase:\" + m['phases'][k]\n description = \"EnergyConsumer, \" + m[\"name\"] + \"Phase: \" + phase_value[k] \n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id, \"EnergyConsumer.p\")\n self.c_ao += 1\n \n name1 = m['name']+\"phase:\" + m['phases'][k] + \"control\"\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name1, description, measurement_id, \"EnergyConsumer.p\")\n self.c_do += 1\n\n return self.out_json", "def _write_time_cube(self, cube, key_list):\n data = cube.data[:]\n coords = cube.coord('time')[:]\n for t in range(0, data.shape[0]):\n value = round_variable(self.input_data.get_value(\n InputType.VARIABLE)[0], data[t])\n with iris.FUTURE.context(cell_datetime_objects=True):\n time_str = coords[t].cell(\n 0).point.strftime('%Y-%m-%d')\n try:\n self.data_dict[time_str].append(value)\n except KeyError:\n key_list.append(time_str)\n self.data_dict[time_str] = [value]", "def _sort_data(self, cubelist):\n sorted_cubelist = []\n for dates in self.dates:\n year_cubelist = self.extract_dates(dates, cubelist)\n sorted_cubelist.append(self._sort_cubelist(year_cubelist))\n return iris.cube.CubeList(sorted_cubelist)", "def define_cube_slice(grid_input: list) -> dict:\n pad = (20 - len(grid_input[0])) // 2\n blank_grid = [[\".\"] * 20] * 20\n grid_output = [[\".\"] * 20] * 6\n for line in grid_input:\n line = [\".\"] * pad + line.split() + [\".\"] * pad\n if len(line) % 2 == 1:\n line.append(\".\")\n grid_output.append(line)\n grid_output += [[\".\"] * 20] * 6\n big_cube = {}\n for i in range(0, 21):\n big_cube[i] = blank_grid\n big_cube[10] = grid_output\n return big_cube", "def select_cubes(dic):\r\n\tname=[]\r\n\tobj_name=create()\r\n\tfor number in dic:\r\n\t\tfor j in range(dic[number]):\r\n\t\t\tnbElt=len(obj_name[number])\r\n\t\t\tname.append(\"cubes\"+chr(47)+str(number)+\"_\"+obj_name[number][np.random.randint(0,nbElt)])\r\n\treturn name", "def _addCounterToMap(probeMap, counter, index):\n if counter.probe in probeMap:\n probeMap[counter.probe].append(index)\n else:\n probeMap.update({counter.probe : [index]})", "def generateNeighborMap(self):\n A=[]\n for key,value in self._ts_dict.iteritems():\n A.append(np.array([i.replace(\"#\",\" \")\n .split()[0:4] for i in value.index])\n .astype(float))\n\n B=np.array(A[0]).reshape(len(A[0]),4)\n print (B[:,0]+B[:,1])/2\n A=[]\n for key,value in self._ts_dict.iteritems():\n A.append(value.sum(axis=1).values)\n print A", "def plays_to_edges(self, plays):\n return {self.ind_dict[i] for i in plays}", "def index_records(vr):\n return collections.OrderedDict((record2key(rec), clean_sample_index(rec))\n for rec in vr)", "def individual_collate(batch):\n\n data = batch\n\n collected_data = defaultdict(list)\n\n for i in range(len(list(data))):\n for k in data[i].keys():\n collected_data[k].append(data[i][k])\n\n for k in collected_data.keys():\n collected_data[k] = torch.stack(collected_data[k])\n\n return collected_data", "def testCube(self):\n cube = {i:(i^1,i^2,i^4) for i in range(8)}\n self.check(cube,6)", "def __init__(self):\n self.vert_dict = {}\n # self.vert_dict = []\n self.num_vertices = 0", "def clients_histogram_aggregates(**kwargs):\n attributes_list = [\n \"sample_id\",\n \"client_id\",\n \"ping_type\",\n \"os\",\n \"app_version\",\n \"app_build_id\",\n \"channel\",\n ]\n fixed_attributes = [\"app_version\", \"channel\"]\n cubed_attributes = [x for x in attributes_list if x not in fixed_attributes]\n return dict(\n attributes_list=attributes_list,\n attributes=\",\".join(attributes_list),\n cubed_attributes=cubed_attributes,\n attribute_combinations=compute_datacube_groupings(cubed_attributes),\n metric_attributes=\"\"\"\n metric,\n metric_type,\n key,\n agg_type\n \"\"\",\n **kwargs,\n )", "def __init__(self, no_vertices=0):\r\n self.__neighbours = {}\r\n self.__cost = {}\r\n for i in range(no_vertices):\r\n self.__neighbours[i] = []", "def set_hypercubes_parents_indices(self):\n for hypercube in self.hypercubes.flatten():\n coordinates = []\n for coord in hypercube.coords:\n coordinates.append([2 * coord, 2 * coord + 1])\n for indices in list(itertools.product(*coordinates)):\n hypercube.parent_hypercubes_indices.append(tuple(indices))", "def create_community_dict(partition, graph):\n it = 0\n communities = dict()\n for part in partition:\n for vertex in part:\n communities[graph.vs[vertex]['name']] = it\n it += 1\n return communities", "def readMembers(self):\n f = open('%s/raw_clumpmembers_%s' %(self.wd,self.file), 'rb')\n #Skip first and last entries from this array.\n data = np.fromfile(f, dtype='i')[1:-1]\n self.nclumps = max(data)\n members = {}\n # I think we don't want ID==0 as this refers to no clump (CHECK...)\n for clump in range(self.nclumps):\n membershold = np.argwhere(data==clump+1).flatten()\n members[clump] = membershold\n self.members = members", "def add_vertex(self, v):\n self[v] = {}", "def add_vertex(self, v):\n self[v] = {}", "def stat_analsysis(map):\n data_stat = OrderedDict()\n for key in map:\n data_dict = {}\n for i in range(len(map[key])):\n data = map[key][i]\n if data not in data_dict:\n #a list which stores the position of each data point\n data_dict[data]=[i]\n else:\n data_dict[data].append(i)\n data_stat[key] = data_dict\n return data_stat", "def data_collection():\n global PAUSED\n print(\"Detecting nodes\")\n while True:\n data = SOCK.recvfrom(1024)[0] # buffer size is 1024 bytes\n message = data.decode()\n try:\n message_function = message[0]\n message = message[1:]\n \n if message_function == \"t\":\n loc, temp, hum = message.split(\", \")\n temp = (float(temp) * 1.8) + 32 # convert from C to F\n\n # Checks if location is alreay in the rolling_X dictionarys. If not, it creates an entry\n # in the dictionary and populates it with the defaults\n if loc not in ROLLING_TEMPS:\n ROLLING_TEMPS[loc] = copy(TEMPDEQUEDEFAULT)\n print(loc, \"has connected\")\n if loc not in ROLLING_HUMS:\n ROLLING_HUMS[loc] = copy(HUMDEQUEDEFAULT)\n\n # Append new temp and humidity to appropriate deque in dictionaries\n ROLLING_TEMPS[loc].appendleft(temp)\n ROLLING_HUMS[loc].appendleft(hum)\n LAST_RECEIVED[loc] = datetime.datetime.utcnow()\n \n elif message_function == \"c\":\n if message == \"pause\":\n PAUSED = True\n print(\"pausing\")\n elif message == \"unpause\":\n PAUSED = False\n print(\"unpausing\")\n else:\n print(\"unknown command function\")\n elif message_function == \"i\":\n if message == \"status\":\n print(\"Paused:\", PAUSED)\n else:\n print(\"unknown info function\")\n except:\n print(\"malformed data\")", "def add_building_output_locations(self,dictionary, start,end,step): \n \"\"\"\n Given a dictionary of building footprints and associated nodes,element and sides, add the values \n to the netcdf grid file.\n \n The nodes, elements and sides associated with each footprint correspond to the there index in the RiCOM grid file\n \n Dictionary format:\n {id1: {'nodes': [n1, n2,...nn] }, {'elements': [e1,e2,...,en] },{'sides': [s1,s2,...,sn]}, id2: {}, id3 {}, ...., idn {} } \n \n idn = the id of the building footprint that the node, elements and sides belong to\n \n \"\"\"\n \n if (dictionary != {}):\n maxNodes = 0\n maxElements = 0\n maxSides = 0\n nodesAll = []\n elementsAll = []\n sidesAll = []\n id = []\n perimeter = []\n type = []\n for row in dictionary.iteritems(): \n id.append(row[0]) \n n = row[1]['nodes'] \n e = row[1]['elements']\n s = row[1]['sides']\n perimeter.append(row[1]['perimeter'])\n \n if row[1]['type'] == \"BUILDINGS_AS_HOLES\":\n typeNUM = 1\n elif row[1]['type'] == \"BUILDINGS_GRIDDED\":\n typeNUM = 2\n\n elif row[1]['type'] == \"BUILDINGS_AS_POINTS\":\n typeNUM = 3\n else:\n typeNUM = 0\n type.append(typeNUM)\n \n nodesAll.extend(n)\n elementsAll.extend(e)\n sidesAll.extend(s)\n if maxNodes < len(n): maxNodes = len(n)\n if maxElements < len(e): maxElements = len(e)\n if maxSides < len(s): maxSides = len(s)\n \n \n #remove repeated elements, sides and nodes\n nodesAll = list(set(nodesAll))\n elementsAll = list(set(elementsAll))\n sidesAll = list(set(sidesAll))\n \n print \"# elements = %s\" % len(elementsAll)\n print \"# sides = %s\" % len(sidesAll)\n print \"# nodes = %s\" % len(nodesAll)\n\n \n #initialise arrays for entry into netcdf file\n nodes = zeros((len(dictionary),maxNodes))\n elements = zeros((len(dictionary),maxElements))\n sides = zeros((len(dictionary),maxSides)) \n \n i = 0\n for row in dictionary.iteritems(): \n nodes[i,0:(len(row[1]['nodes']))] = row[1]['nodes']\n elements[i,0:(len(row[1]['elements']))] = row[1]['elements']\n sides[i,0:(len(row[1]['sides']))] = row[1]['sides']\n i+=1 \n \n #create dimensions\n try: self.buildings.createDimension('max_number_nodes',maxNodes)\n except Exception, e: print \"WARNING: %s\" % e\n try: self.buildings.createDimension('max_number_elements',maxElements)\n except Exception, e: print \"WARNING: %s\" % e\n try: self.buildings.createDimension('max_number_sides',maxSides)\n except Exception, e: print \"WARNING: %s\" % e\n try: self.buildings.createDimension('number_of_buildings',len(dictionary))\n except Exception, e: print \"WARNING: %s\" % e \n try: self.building_nodes.createDimension('number_of_nodes',len(nodesAll))\n except Exception, e: print \"WARNING: %s\" % e\n try: self.building_elements.createDimension('number_of_elements',len(elementsAll))\n except Exception, e: print \"WARNING: %s\" % e\n try: self.building_sides.createDimension('number_of_sides',len(sidesAll))\n except Exception, e: print \"WARNING: %s\" % e\n \n \n #create variables\n try: building_id = self.buildings.createVariable(varname = 'building_id',datatype = 'i', dimensions=('number_of_buildings',)) \n except Exception, e:\n building_id = self.buildings.variables['building_id']\n print \"WARNING: %s\" % e\n \n try: building_wkt = self.buildings.createVariable(varname = 'building_wkt',datatype = str, dimensions=('number_of_buildings',)) \n except Exception, e:\n building_wkt = self.buildings.variables['building_wkt'] \n print \"WARNING: %s\" % e\n\n try: building_perimeter = self.buildings.createVariable(varname = 'building_perimeter',datatype = 'd', dimensions=('number_of_buildings',)) \n except Exception, e:\n building_perimeter = self.buildings.variables['building_perimeter'] \n print \"WARNING: %s\" % e\n\n\n try: building_type = self.buildings.createVariable(varname = 'building_type',datatype = 'i', dimensions=('number_of_buildings',)) \n except Exception, e:\n building_type = self.buildings.variables['building_type'] \n print \"WARNING: %s\" % e\n\n try: building_nodes = self.buildings.createVariable(varname = 'building_nodes',datatype = 'i', dimensions=('number_of_buildings','max_number_nodes',)) \n except Exception, e:\n building_nodes = self.buildings.variables['building_nodes'] \n print \"WARNING: %s\" % e\n \n try: building_elements = self.buildings.createVariable(varname = 'building_elements',datatype = 'i', dimensions=('number_of_buildings','max_number_elements',)) \n except Exception, e:\n building_elements = self.buildings.variables['building_elements']\n print \"WARNING: %s\" % e\n \n try: building_sides = self.buildings.createVariable(varname = 'building_sides',datatype = 'i', dimensions=('number_of_buildings','max_number_sides',)) \n except Exception, e:\n building_sides = self.buildings.variables['building_sides']\n print \"WARNING: %s\" % e\n \n building_nodes[:] = nodes\n building_elements[:] = elements\n building_sides[:] = sides\n building_id[:] = array(id) \n building_perimeter[:] = array(perimeter)\n building_type[:] = array(type)\n #Set the attributes\n self.building_nodes.start = start\n self.building_nodes.finish = end\n self.building_nodes.step = step\n self.building_elements.start = start\n self.building_elements.finish = end\n self.building_elements.step = step\n self.building_sides.start = start\n self.building_sides.finish = end\n self.building_sides.step = step\n \n #assign the data\n output_ids = {'nodes': [], 'elements': [], 'sides': []}\n try: output_ids['nodes'] = self.building_nodes.createVariable(varname = 'id',datatype = 'i', dimensions=('number_of_nodes',))\n except Exception, e:\n output_ids['nodes'] = self.building_nodes.variables['id']\n print \"WARNING: %s\" % e\n try: output_ids['elements'] = self.building_elements.createVariable(varname = 'id',datatype = 'i', dimensions=('number_of_elements',))\n except Exception, e:\n output_ids['elements'] = self.building_elements.variables['id']\n print \"WARNING: %s\" % e\n try: output_ids['sides'] = self.building_sides.createVariable(varname = 'id',datatype = 'i', dimensions=('number_of_sides',))\n except Exception, e:\n output_ids['sides'] = self.building_sides.variables['id']\n print \"WARNING: %s\" % e\n \n \n output_ids['nodes'][:] = array(nodesAll)\n output_ids['elements'][:] = array(elementsAll)\n output_ids['sides'][:] = array(sidesAll)\n \n \n self.buildingsAdded = True\n else:\n #create dimensions\n try: self.buildings.createDimension('number_of_buildings',0)\n except Exception, e: print \"WARNING: %s\" % e \n try: self.building_nodes.createDimension('number_of_nodes',0)\n except Exception, e: print \"WARNING: %s\" % e\n try: self.building_elements.createDimension('number_of_elements',0)\n except Exception, e: print \"WARNING: %s\" % e\n try: self.building_sides.createDimension('number_of_sides',0)\n except Exception, e: print \"WARNING: %s\" % e \n self.buildingsAdded = True", "def __init__(self):\r\n self.indices = {}\r\n self.data = []\r\n self.len = 0", "def _pack_items(self):\n identifiers = tuple(self.identify_items(self))\n cache_keys = self.make_cache_keys(identifiers)\n cache_items = dict(izip(cache_keys, self))\n self.cache.set_many(cache_items, self.cache_timeout)\n return identifiers", "def initialize_connected_data(frame_data):\n connected_data = {}\n for i in range(0, len(frame_data)):\n this_spot_data = add_time_nuc(frame_data[i], 0, nucmask)\n connected_data[i+1] = np.array([this_spot_data])\n return connected_data", "def categorize_data(data, top_count):\n sorted_by_tcp = sorted(\n data, key=lambda x: x['TCP Utilization'], reverse=True\n )[0:top_count]\n sorted_by_udp = sorted(\n data, key=lambda x: x['UDP Utilization'], reverse=True\n )[0:top_count]\n\n print(f\"\\nTOP-{top_count} port flooders by TCP\")\n print(tabulate(sorted_by_tcp, headers='keys', tablefmt=\"psql\"))\n print(f\"\\nTOP-{top_count} port flooders by UDP\")\n print(tabulate(sorted_by_udp, headers='keys', tablefmt=\"psql\"))", "def calc_positions(zpoints, dsq_list):\n\n pos_map = {}\n points_map = {}\n\n for z, p in zpoints.items():\n if z in dsq_list:\n p = -1\n if p not in points_map:\n points_map[p] = set()\n points_map[p].add(z)\n\n i = 1\n for p in sorted(list(points_map.keys()), reverse = True):\n pos_map[i] = points_map[p]\n i += len(points_map[p])\n\n return pos_map", "def ranking(availability_info,mapds):\n rank=Counter(dict())\n for key in availability_info.keys():\n rank[mapds[key]]=len(availability_info[key])\n #print rank\n return rank", "def create_data_ia(map_size, enemy_id, ia_id):\n data_ia = {'player1': {},\n 'player2': {},\n 'main_turn': 1,\n 'attack_turn': 0,\n 'map_size': map_size,\n 'enemy_id': enemy_id,\n 'ia_id': ia_id}\n\n\n order_unit = {}\n order_unit['if_left'] = [(2,3), (3,2), (1,3), (2,2), (3,1), (1,2), (2,1), (1,1)]\n order_unit['if_right'] = [(map_size -1, map_size -2), (map_size -2, map_size -1), (map_size, map_size -2), (map_size -1, map_size -1), (map_size -1, map_size -1), (map_size -2, map_size), (map_size, map_size-1), (map_size -1, map_size), (map_size, map_size)]\n\n for i in range(2):\n for line in range(1, 4):\n for column in range(1, 4):\n unit = 'E'\n life = 4\n\n if line >= 2 and column >= 2:\n unit = 'D'\n life = 10\n\n if line + column != 6:\n x_pos = abs(i * map_size - line + i)\n y_pos = abs(i * map_size - column + i)\n\n if i == 0:\n unit_id = (order_unit['if_left'].index((x_pos,y_pos))) + 1\n data_ia['player1'][(x_pos, y_pos)] = [unit, life, unit_id]\n else:\n unit_id = (order_unit['if_right'].index((x_pos,y_pos))) + 1\n data_ia['player2'][(x_pos, y_pos)] = [unit, life, unit_id]\n\n return data_ia", "def addVertex(self,x):\n self.dictOut[x]=[]", "def __init__(self):\n ## self.clusters[cluster] = list of coordinates\n self.clusters = {}\n ## self.centroids[cluster] = centroid\n self.centroids = {}", "def initializeCollection():\n return {SENSOR1:[], SENSOR2:[], SENSOR3:[],SENSOR4:[], DATE:[]}", "def load_canonical(self, **kwargs) -> dict:\n conn = None\n if not isinstance(self.connector_contract, ConnectorContract):\n raise ValueError(\"The Connector Contract is not valid\")\n # this supports redis hmap only...\n cc_params = self.connector_contract.kwargs\n cc_params.update(kwargs) # Update with any passed though the call\n\n match = cc_params.get('match', '*')\n count = cc_params.get('count', 1000)\n keys = cc_params.get('keys')\n if not keys or len(keys) == 0:\n raise ValueError(\"RedisConnector requires an array of 'keys'\")\n try:\n conn = self.redis.from_url(self.connector_contract.uri)\n rtn_dict = {'id': []}\n for rowkey in conn.scan_iter(match, count):\n \"\"\"\n {\n \"col1\" = [1,2,3],\n \"col2\" = [\"a\",\"b\",\"c\"]\n }\n \"\"\"\n rowdata = conn.hgetall(rowkey)\n rtn_dict.get('id').append(rowkey.decode())\n for colkey in keys:\n if colkey not in rtn_dict:\n rtn_dict[colkey] = []\n raw_col_val = rowdata.get(str.encode(colkey))\n if raw_col_val:\n col_val = raw_col_val.decode()\n else:\n col_val = None\n rtn_dict.get(colkey).append(col_val)\n conn.close()\n return rtn_dict\n except Exception as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n print('Database connection closed.')", "def _es_push_indexes(self, content):\n for c in self.es_clients:\n c.create_index(content)", "def get_ground_truth_dict(vertices, graph, log):\n vertices = index_vertices(vertices, graph)\n vertices.sort()\n cluster = 0\n ground_truth = dict()\n for vertex in vertices:\n v_name = graph.vs[vertex]['name']\n for trace in log:\n for event in trace:\n if v_name == event['concept:name']:\n cluster = event['truth']\n ground_truth[v_name] = cluster\n return ground_truth", "def create_cell_map(dim):\n for cell, faces in cell_face_map.iteritems():\n \n for face in faces:\n nds = face_list[face - 1][1]\n \n if not cell in cell_map:\n cell_map[cell] = copy(nds)\n \n else:\n cell_map[cell] = list(Set(cell_map[cell] + nds))", "def __init__(self):\n self.data = []\n self.idx = {}", "def update_data(self):\n for sai_id_key in self.if_id_map:\n namespace, sai_id = mibs.split_sai_id_key(sai_id_key)\n if_idx = mibs.get_index_from_str(self.if_id_map[sai_id_key])\n counter_table = self.namespace_db_map[namespace].get_all(mibs.COUNTERS_DB, \\\n mibs.counter_table(sai_id))\n if counter_table is None:\n counter_table = {}\n self.if_counters[if_idx] = counter_table\n\n\n self.lag_name_if_name_map, \\\n self.if_name_lag_name_map, \\\n self.oid_lag_name_map, _, _ = Namespace.get_sync_d_from_all_namespace(mibs.init_sync_d_lag_tables, self.db_conn)\n\n self.if_range = sorted(list(self.oid_name_map.keys()) + list(self.oid_lag_name_map.keys()))\n self.if_range = [(i,) for i in self.if_range]", "def cube_array(self):\n cube_sides = {}\n\n for side in SIDES:\n cube_sides[side] = []\n \n # Todo Break this loop into helper functions for clarity and simplicity\n for coord in COORDS_3:\n for cubie in self.cubies:\n # Making sure that the cubes cubies are processed in the correct order\n if np.array_equal(cubie.coordinates, coord): \n \n \n for side in SIDES:\n if cubie.in_side(side):\n for face in cubie.faces:\n \n # Checking that the face of the cubie has the same norm as the side we are processing\n if np.array_equal(face.norm, NORMS[side]):\n cube_sides[side].append(face.colour)\n\n new_list = [cube_sides[\"U\"], cube_sides[\"F\"], reversal(cube_sides[\"R\"]), reversal(cube_sides[\"B\"]),\n cube_sides[\"L\"], reversal(cube_sides[\"D\"])]\n \n final_list = [nine_to_3x3(side) for side in new_list]\n return final_list", "def add_vertices(self, vertices: Iterable[\"Vertex\"]) -> Sequence[int]:\n indices = []\n precision = self.precision\n for vertex in vertices:\n vertex = Vec3(vertex)\n key = vertex.round(precision) # type: ignore\n try:\n index, count = self.ledger[key]\n except KeyError: # new key\n index = len(self.vertices)\n self.vertices.append(vertex)\n self.ledger[key] = (index, 1)\n else: # update key entry\n # calculate new average location\n average = (self.vertices[index] * count) + vertex\n count += 1\n # update vertex location\n self.vertices[index] = average / count\n # update ledger\n self.ledger[key] = (index, count)\n indices.append(index)\n return tuple(indices)", "def sort_on_cluster_size( self ):\n cluster_size = []\n extents = {}\n eci_names = {}\n eci_values = {}\n\n for key,value in self.eci.items():\n size = int(key[1])\n extent = cluster_dia_from_name(key)\n if ( size in eci_names.keys() ):\n eci_names[size].append(key)\n eci_values[size].append(value)\n extents[size].append(extent)\n else:\n eci_names[size] = [key]\n eci_values[size] = [value]\n extents[size] = [extent]\n\n # Within each size sort on absolute value\n for key,value in eci_values.items():\n indx_srt = np.argsort(np.abs(extents[key]))\n new_list = [value[indx] for indx in indx_srt]\n eci_values[key] = new_list\n new_list = [eci_names[key][indx] for indx in indx_srt]\n eci_names[key] = new_list\n return eci_names, eci_values", "def anchors_to_adjacency(set_path, n_proteomes, mailbox_reader):\n frame_list = []\n for idx in range(n_proteomes):\n with mailbox_reader(idx) as file_handle:\n frame_list.append(\n pd.read_csv(\n file_handle, sep=\"\\t\", index_col=0\n ).convert_dtypes()\n )\n nodes = pd.concat(\n frame_list,\n ignore_index=True,\n )\n del frame_list\n graph = nx.Graph()\n for unused_tuple, subframe in nodes.groupby(\n by=[\"syn.anchor.id\", \"syn.anchor.sub_id\"]\n ):\n ids = subframe[\"member_ids\"]\n n_ids = len(ids)\n graph.add_nodes_from(ids)\n if n_ids > 1:\n edges = combinations(ids, 2)\n graph.add_edges_from(edges, weight=n_ids)\n outpath = set_path / ANCHORS_FILE\n summarypath = outpath.parent / (\n outpath.name[: -len(outpath.suffix)] + \"_summary.tsv\"\n )\n histpath = outpath.parent / (\n outpath.name[: -len(outpath.suffix)] + \"_hist.tsv\"\n )\n components = [\n c\n for c in sorted(nx.connected_components(graph), key=len, reverse=True)\n if len(c) > 1\n ]\n fh = outpath.open(\"w\")\n fh.write(\"idx\\tcluster_id\\tsize\\tmembers\\n\")\n n_items = 0\n count_list = []\n hash_list = []\n id_list = []\n for i, comp in enumerate(components):\n component = np.sort(pd.Index(list(comp)).to_numpy())\n id_list.append(i)\n size = len(comp)\n count_list.append(size)\n hash_list.append(hash_array(component))\n for node in component:\n fh.write(f\"{n_items}\\t{i}\\t{size}\\t{node}\\n\")\n n_items += 1\n fh.close()\n n_clusts = len(count_list)\n del graph, components\n cluster_counts = pd.DataFrame({\"size\": count_list})\n largest_cluster = cluster_counts[\"size\"].max()\n cluster_hist = (\n pd.DataFrame(cluster_counts.value_counts()).sort_index().reset_index()\n )\n cluster_hist = cluster_hist.set_index(\"size\")\n cluster_hist = cluster_hist.rename(columns={0: \"n\"})\n cluster_hist[\"item_pct\"] = (\n cluster_hist[\"n\"] * cluster_hist.index * 100.0 / n_items\n )\n cluster_hist.to_csv(histpath, sep=\"\\t\", float_format=\"%5.2f\")\n cluster_hist[\"cluster_pct\"] = cluster_hist[\"n\"] * 100.0 / n_clusts\n cluster_hist.to_csv(histpath, sep=\"\\t\", float_format=\"%5.2f\")\n clusters = pd.DataFrame(\n {\"anchor.id\": id_list, \"count\": count_list, \"hash\": hash_list}\n )\n clusters.to_csv(summarypath, sep=\"\\t\")\n stats_dict = {\n \"in_anchor\": n_items,\n \"syn.anchors.n\": n_clusts,\n \"syn.anchors.largest\": largest_cluster,\n }\n return stats_dict", "def __init__(self):\n self.vert_list = {}\n self.num_vertices = 0", "def dict_collate(data):\n\n # Assuming there's at least one instance in the batch\n add_data_keys = data[0].keys()\n collected_data = {k: [] for k in add_data_keys}\n\n for i in range(len(list(data))):\n for k in add_data_keys:\n collected_data[k].append(data[i][k])\n\n for k in add_data_keys:\n collected_data[k] = torch.cat(collected_data[k], 0)\n\n # Passing redundant information for compatibility\n return collected_data, collected_data[\"target\"]", "def easier_indexing(Gs):\n keys = [Gs[i][1][:-5] for i in range(len(Gs))]\n topic_dict = dict(zip(keys, list(range(len(keys)))))\n return topic_dict", "def ResortPeers(self):\n \n self.sortedPeerList = []\n append = self.sortedPeerList.append\n for i in self.peerDatabase.keys():\n append((self.peerDatabase[i].RemainingRemoteStorage(), i))\n self.sortedPeerList.sort()\n self.sortedPeerList.reverse()", "def process_cluster(self, cluster):\n if self._ignore_cluster(cluster):\n return\n\n self.cluster_lists[self.file_index].append(cluster)\n spectra = list(cluster.get_spectra())\n\n def mixed_order( spec ): # for sorting\n return (spec.get_title())\n\n spectra.sort(key = mixed_order) \n self.sorted_spectra_dict[cluster.id] = spectra", "def IpTrafficAnalysis(client):\n\tindex = \"netflow*\"\n\tbucket1 = \"src_addr\"\n\tbucket2 = \"dst_addr\"\n\t\n\t#aggregate ipv4 flows\n\tbucket1DocValue = \"netflow.ipv4_src_addr\"\n\tbucket2DocValue = \"netflow.ipv4_dst_addr\"\t\n\tqDict = QueryBuilder().BuildDoubleAggregateQuery(bucket1, bucket2, bucket1DocValue, bucket2DocValue, level1BucketType=\"terms\", level2BucketType=\"terms\", level1DocValueType=\"field\", level2DocValueType=\"field\", size=0)\n\tjsonBucket = client.aggregate(index, qDict)\n\taggDict_Ipv4 = jsonBucket[\"aggregations\"]\n\t#aggregate ipv6 flows\n\tbucket1DocValue = \"netflow.ipv6_src_addr\"\n\tbucket2DocValue = \"netflow.ipv6_dst_addr\"\n\tqDict = QueryBuilder().BuildDoubleAggregateQuery(bucket1, bucket2, bucket1DocValue, bucket2DocValue, level1BucketType=\"terms\", level2BucketType=\"terms\", level1DocValueType=\"field\", level2DocValueType=\"field\", size=0)\n\tjsonBucket = client.aggregate(index, qDict)\n\taggDict_Ipv6 = jsonBucket[\"aggregations\"]\n\t#aggregate the ipv4/6 dictionaries together\n\taggDict = aggDict_Ipv4\n\taggDict[bucket1][\"buckets\"] += aggDict_Ipv6[bucket1][\"buckets\"]\n\t\n\tlabelVertices=True\n\tlabelEdges=False\n\t#aggDict = {u'src_addr': {u'buckets': [{u'dst_addr': {u'buckets': [{u'key': u'192.168.1.160', u'doc_count': 1061347}, {u'key': u'192.168.1.11', u'doc_count': 14857}, {u'key': u'192.168.0.12', u'doc_count': 14852}, {u'key': u'192.168.1.102', u'doc_count': 13044}, {u'key': u'239.255.255.250', u'doc_count': 7607}, {u'key': u'192.168.0.11', u'doc_count': 7382}, {u'key': u'192.168.0.91', u'doc_count': 5283}, {u'key': u'192.168.3.216', u'doc_count': 1730}, {u'key': u'192.168.0.1', u'doc_count': 625}, {u'key': u'192.168.1.118', u'doc_count': 257}], u'sum_other_doc_count': 544, u'doc_count_error_upper_bound': 1}, u'key': u'192.168.2.10', u'doc_count': 1127528}, {u'dst_addr': {u'buckets': [{u'key': u'192.168.2.10', u'doc_count': 1061347}, {u'key': u'239.255.255.250', u'doc_count': 14710}, {u'key': u'192.168.0.14', u'doc_count': 605}, {u'key': u'255.255.255.255', u'doc_count': 315}, {u'key': u'224.0.0.1', u'doc_count': 312}, {u'key': u'224.0.0.252', u'doc_count': 264}, {u'key': u'224.0.0.251', u'doc_count': 9}, {u'key': u'224.0.1.129', u'doc_count': 2}, {u'key': u'239.192.152.143', u'doc_count': 2}], u'sum_other_doc_count': 0, u'doc_count_error_upper_bound': 0}, u'key': u'192.168.1.160', u'doc_count': 1077566}, {u'dst_addr': {u'buckets': [{u'key': u'192.168.0.1', u'doc_count': 104641}, {u'key': u'239.255.255.250', u'doc_count': 81122}, {u'key': u'224.0.0.252', u'doc_count': 24754}, {u'key': u'172.217.3.163', u'doc_count': 20530}, {u'key': u'172.217.3.174', u'doc_count': 19105}, {u'key': u'134.121.120.167', u'doc_count': 16311}, {u'key': u'192.168.3.255', u'doc_count': 8152}, {u'key': u'64.4.54.254', u'doc_count': 7700}, {u'key': u'64.71.168.217', u'doc_count': 7127}, {u'key': u'192.168.1.114', u'doc_count': 6920}], u'sum_other_doc_count': 187585, u'doc_count_error_upper_bound': 1754}, u'key': u'192.168.0.14', u'doc_count': 483947}, {u'dst_addr': {u'buckets': [{u'key': u'192.168.0.14', u'doc_count': 120591}, {u'key': u'255.255.255.255', u'doc_count': 2397}, {u'key': u'239.255.255.250', u'doc_count': 508}, {u'key': u'192.168.2.10', u'doc_count': 247}, {u'key': u'192.168.3.224', u'doc_count': 79}, {u'key': u'224.0.0.1', u'doc_count': 63}, {u'key': u'224.0.0.252', u'doc_count': 14}, {u'key': u'192.168.0.109', u'doc_count': 10}, {u'key': u'192.168.0.111', u'doc_count': 4}, {u'key': u'192.168.0.16', u'doc_count': 4}], u'sum_other_doc_count': 7, u'doc_count_error_upper_bound': 0}, u'key': u'192.168.0.1', u'doc_count': 123924}, {u'dst_addr': {u'buckets': [{u'key': u'239.255.255.250', u'doc_count': 87186}, {u'key': u'192.168.2.10', u'doc_count': 21272}, {u'key': u'192.168.3.255', u'doc_count': 8093}, {u'key': u'255.255.255.255', u'doc_count': 2206}, {u'key': u'192.168.0.14', u'doc_count': 78}, {u'key': u'224.0.0.252', u'doc_count': 2}], u'sum_other_doc_count': 0, u'doc_count_error_upper_bound': 0}, u'key': u'192.168.0.12', u'doc_count': 118837}, {u'dst_addr': {u'buckets': [{u'key': u'239.255.255.250', u'doc_count': 69383}, {u'key': u'192.168.3.255', u'doc_count': 11231}, {u'key': u'192.168.0.14', u'doc_count': 200}, {u'key': u'192.168.2.10', u'doc_count': 64}, {u'key': u'224.0.0.252', u'doc_count': 35}, {u'key': u'255.255.255.255', u'doc_count': 4}], u'sum_other_doc_count': 0, u'doc_count_error_upper_bound': 0}, u'key': u'192.168.0.13', u'doc_count': 80917}, {u'dst_addr': {u'buckets': [{u'key': u'239.255.255.250', u'doc_count': 37482}, {u'key': u'192.168.2.10', u'doc_count': 18645}, {u'key': u'192.168.15.255', u'doc_count': 7153}, {u'key': u'192.168.3.255', u'doc_count': 6852}, {u'key': u'255.255.255.255', u'doc_count': 3385}, {u'key': u'192.168.0.14', u'doc_count': 107}, {u'key': u'224.0.0.251', u'doc_count': 28}, {u'key': u'224.0.0.252', u'doc_count': 10}, {u'key': u'192.168.1.111', u'doc_count': 5}, {u'key': u'224.0.1.129', u'doc_count': 1}], u'sum_other_doc_count': 0, u'doc_count_error_upper_bound': 0}, u'key': u'192.168.1.102', u'doc_count': 73668}, {u'dst_addr': {u'buckets': [{u'key': u'239.255.255.250', u'doc_count': 32847}, {u'key': u'192.168.2.10', u'doc_count': 21241}, {u'key': u'192.168.3.255', u'doc_count': 12561}, {u'key': u'255.255.255.255', u'doc_count': 3511}, {u'key': u'192.168.0.14', u'doc_count': 355}, {u'key': u'192.168.2.101', u'doc_count': 9}, {u'key': u'192.168.2.102', u'doc_count': 9}, {u'key': u'192.168.2.103', u'doc_count': 9}, {u'key': u'192.168.2.107', u'doc_count': 8}, {u'key': u'192.168.2.108', u'doc_count': 8}], u'sum_other_doc_count': 35, u'doc_count_error_upper_bound': 0}, u'key': u'192.168.1.11', u'doc_count': 70593}, {u'dst_addr': {u'buckets': [{u'key': u'239.255.255.250', u'doc_count': 48167}, {u'key': u'192.168.1.255', u'doc_count': 7814}, {u'key': u'255.255.255.255', u'doc_count': 2350}, {u'key': u'224.0.0.252', u'doc_count': 80}, {u'key': u'192.168.3.255', u'doc_count': 3}, {u'key': u'224.0.0.251', u'doc_count': 3}, {u'key': u'192.168.0.14', u'doc_count': 1}, {u'key': u'192.168.1.101', u'doc_count': 1}], u'sum_other_doc_count': 0, u'doc_count_error_upper_bound': 0}, u'key': u'192.168.1.14', u'doc_count': 58419}, {u'dst_addr': {u'buckets': [{u'key': u'239.255.255.250', u'doc_count': 31456}, {u'key': u'255.255.255.255', u'doc_count': 8959}, {u'key': u'192.168.3.255', u'doc_count': 7454}, {u'key': u'192.168.2.10', u'doc_count': 7387}, {u'key': u'192.168.0.14', u'doc_count': 187}, {u'key': u'224.0.0.252', u'doc_count': 4}, {u'key': u'192.168.0.16', u'doc_count': 3}, {u'key': u'192.168.2.101', u'doc_count': 1}, {u'key': u'192.168.2.102', u'doc_count': 1}, {u'key': u'192.168.2.103', u'doc_count': 1}], u'sum_other_doc_count': 6, u'doc_count_error_upper_bound': 0}, u'key': u'192.168.0.11', u'doc_count': 55459}], u'sum_other_doc_count': 410259, u'doc_count_error_upper_bound': 4257}}\n\tg = AggToNetworkGraph(aggDict, bucket1, bucket2, labelVertices, labelEdges)\n\tg.write_graphml(\"./ip_traffic.graphml\")\n\tgraphPlot = PlotNetworkGraph(g, labelVertices, labelEdges)\n\tgraphPlot.save(\"ipTraffic.png\")\n\tadjacencyMatrix = g.get_adjacency(attribute=\"weight\", default=0)\n\tprint(str(type(adjacencyMatrix))+\"\\n\"+str(adjacencyMatrix))\n\t\n\tPlotDirectedEdgeHistogram(g, \"weight\")", "def _hash(self) -> None:\r\n # for a unit cube there are 8 possible hashes\r\n # returns the tuple of with all 8 hashes\r\n\r\n self.hashes[\"aaa\"] = P[P[P[self.xi] + self.yi] + self.zi]\r\n self.hashes[\"aab\"] = P[P[P[self.xi] + self.yi] + self._inc(self.zi)]\r\n self.hashes[\"aba\"] = P[P[P[self.xi] + self._inc(self.yi)] + self.zi]\r\n self.hashes[\"abb\"] = P[P[P[self.xi] + self._inc(self.yi)] + self._inc(self.zi)]\r\n self.hashes[\"baa\"] = P[P[P[self._inc(self.xi)] + self.yi] + self.zi]\r\n self.hashes[\"bab\"] = P[P[P[self._inc(self.xi)] + self.yi] + self._inc(self.zi)]\r\n self.hashes[\"bba\"] = P[P[P[self._inc(self.xi)] + self._inc(self.yi)] + self.zi]\r\n self.hashes[\"bbb\"] = P[P[P[self._inc(self.xi)] + self._inc(self.yi)] + self._inc(self.zi)]", "def main(host='10.84.109.148', port=8086):\n user = \"\"\n password = \"\"\n dbname = \"ruuvi1\"\n dbuser = \"\"\n dbuser_password = \"\"\n client = InfluxDBClient(host, port, user, password, dbname)\n for mac in roovi_macs:\n query = \"select last(humidity),temperature, time from ruuvi_measurements where mac = \"+\"\\'\"+mac+\"\\'\" #filter temp, humidity data across all ruuvitags\n result = client.query(query)\n cpu_points = list(result.get_points(measurement='ruuvi_measurements')) #convert datatype to list\n for points in cpu_points:\n val= getfwi(points.get('time'),points.get('last'),points.get('temperature'))\n new_dic[roovi_locs.get(mac)]=val #store FWI index acc to location\n print(\" \\n\\n\\tLocation |\\t FWI\")\n pprint (new_dic)", "def load_history_entries(self, *entries):\n # Simplified version:\n for entry in entries:\n try:\n self[entry.url.host] += [entry]\n except KeyError:\n self[entry.url.host] = [entry]\n \n \n temp_dict = {entry.url.host: [] for entry in entries} \n for entry in entries:\n temp_dict[entry.url.host] += [entry]\n\n # Update the dictionary\n # self.update(temp_dict) # Will override any lists with the same host name\n for host, entry in temp_dict.items():\n #try:\n self[host] += [entry]\n #except IndexError:\n #self[host] = [entry]", "def indexes(self):\n return {'status': self._status_sort, 'rms': self._rms_sort}", "def __init__(self):\n\n self._dict = OrderedDict(zip(const.BFHCOLS, [0] * 111))", "def add_vertex(self, room):\r\n if room['room_id'] not in self.rooms:\r\n self.rooms[room['room_id']] = room\r\n # self.rooms[room['room_id']]['exits'] = {\r\n # d: '?' for d in room['exits']}\r", "def create_dict(info):\n \"\"\"\n dict = {ip: {counter:*}, {weekdays: []}, {hours: []}}\n \"\"\"\n dict_info = dict()\n for i in info:\n ip = i[0]\n hours = i[1]\n weekdays = i[2]\n if ip not in dict_info:\n dict_info[ip] = {}\n dict_info[ip]['counter'] = 0\n dict_info[ip]['hours'] = []\n dict_info[ip]['weekdays'] = []\n dict_info[ip]['counter'] += 1\n dict_info[ip]['hours'].append(hours)\n dict_info[ip]['weekdays'].append(weekdays)\n return dict_info", "def add_communites(self):\n\n query = '''\n MATCH (c1:)-[r:INTERACTS]->(c2:)\n RETURN c1.name, c2.name, r.weight AS weight\n '''\n ig = IGraph.TupleList(self.graph.run(query), weights=True)\n\n clusters = IGraph.community_walktrap(ig, weights=\"weight\").as_clustering()\n\n nodes = [{\"name\": node[\"name\"]} for node in ig.vs]\n for node in nodes:\n idx = ig.vs.find(name=node[\"name\"]).index\n node[\"community\"] = clusters.membership[idx]\n\n write_clusters_query = '''\n UNWIND {nodes} AS n\n MATCH (c:) WHERE c.name = n.name\n SET c.community = toInt(n.community)\n '''\n\n self.graph.run(write_clusters_query, nodes=nodes)", "def __getitem__(self, index):\n return (index, self.data_cube[0, index, :])", "def run(self):\n lsh, minhashes = self._new_lsh_index()\n total_num_events = len(minhashes)\n for key, minhash in minhashes.items():\n event_id, event_type, index_name = key\n score = self._calculate_score(lsh, minhash, total_num_events)\n self._update_event(event_id, event_type, index_name, score)\n\n return dict(\n index=self._config.index,\n data_type=self._config.data_type,\n num_events_processed=total_num_events\n )", "def new(num_buckets=256):\n aMap=[]", "def get_stats(self):\n\t\n\tceph_cluster = \"%s-%s\" % (self.prefix, self.cluster)\n\n\tdata = { ceph_cluster: { } }\n\tadmin_folder=\"/var/run/ceph/\"\n\tif(os.path.isdir(admin_folder)):\n\t\tfiles=os.walk(admin_folder).next()[2]\n else:\n\t\tprint \"No folder exists \"+admin_folder\n\t\treturn -1\n\tabs_path=[admin_folder+x for x in files]\n\tadmin_socket = max(abs_path, key=os.path.getmtime)\n\tcmd = \"ceph --admin-daemon \"+admin_socket +\" perf dump -f json\"\n\ttry:\n\t\toutput = subprocess.check_output(cmd, shell=True)\n\texcept Exception as exc:\n\t\tcollectd.error(\"ceph-osd: failed to ceph osd perf dump :: %s :: %s\" % (exc, traceback.format_exc()))\n\t\treturn\n\n\tif output is None:\n\t\tcollectd.error('ceph-osd: failed to ceph osd perf dump :: output was None')\n\n\tjson_data = json.loads(output)\n\tmatch=(re.search(r'([\\w.-]+)(\\d)([\\w.-]+)',admin_socket))\n\tif match:\n\t\tosd_id=match.group(2)\n\telse:\n\t\treturn\n\tdata[ceph_cluster][osd_id]={}\n\tdata[ceph_cluster][osd_id]['op_latency']={}\n\tdata[ceph_cluster][osd_id]['op_w_latency']={}\n\tdata[ceph_cluster][osd_id]['op_r_latency']={}\n\tdata[ceph_cluster][osd_id]['op_latency']['sum']=json_data['osd']['op_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_latency']['avgcount']=json_data['osd']['op_latency']['avgcount']\n\tdata[ceph_cluster][osd_id]['op_w_latency']['sum']=json_data['osd']['op_w_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_w_latency']['avgcount']=json_data['osd']['op_w_latency']['avgcount']\n\tdata[ceph_cluster][osd_id]['op_r_latency']['sum']=json_data['osd']['op_r_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_r_latency']['avgcount']=json_data['osd']['op_r_latency']['avgcount']\n\n\t#print data\t\n\treturn data", "def combine_index(vcf_fnames):\n data_pile = collections.defaultdict(list)\n all_idx = collections.OrderedDict()\n all_samples = set()\n vcf_to_samples={}\n for vcf_fname in vcf_fnames:\n vr = vcf.Reader(filename=vcf_fname)\n caller = guess_caller(vr)\n samples = clean_samples(vr)\n vr.samples = samples\n idx = index_records(vr)\n print(vcf_fname, len(idx), caller, *samples, sep='\\t', file=sys.stderr)\n vcf_to_samples[vcf_fname] = samples\n data_pile[caller].append((idx, samples))\n all_samples.update(samples)\n for idx, samples, caller in cleanse_callers(data_pile):\n index_add(all_idx, idx, samples, caller)\n return sorted(all_idx.iteritems(), key=sortkey), sorted(all_samples), vcf_to_samples", "def make_sorted_profile_dict():\n user_indices = np.unique(plays.indices)\n user_indices = np.sort(user_indices, axis=-1, kind='quicksort', order=None)\n profiles = {}\n with tqdm.tqdm(total=len(user_indices)) as progress:\n for u_i in user_indices:\n \"\"\" results in: dict of { u_i : list[a_i] } sorted plays view \"\"\"\n p = tuple(zip([a for a in plays.getcol(u_i).tocoo().row], plays.getcol(u_i).tocoo().data))\n p = sorted(p, key=operator.itemgetter(1), reverse=True)\n list1, list2 = zip(*p)\n\n \"\"\" results in: dict of { u_i : tupe[(artist_name, plays), ..., n] } sorted plays view \"\"\"\n profiles[str(u_i)] = list(map(int, list1))\n \n progress.update(1)\n return profiles", "def __loadIndex( self ):\n\n assert self.mCreateMode == False, \"asked to read from database opened for writing\"\n\n if self.mMethod == \"uncompressed\":\n self.mDatabaseFile = open( self.mDbname, \"r\" )\n elif self.mMethod == \"dictzip\":\n import dictzip\n self.mDatabaseFile = dictzip.GzipFile( self.mNameDb)\n elif self.mMethod == \"lzo\":\n import lzo\n self.mDatabaseFile = Uncompressor( self.mNameDb, lzo.decompress )\n elif self.mMethod == \"gzip\":\n self.mDatabaseFile = Uncompressor( self.mNameDb, gzip_demangler )\n elif self.mMethod == \"zlib\":\n self.mDatabaseFile = Uncompressor( self.mNameDb, zlib.decompress )\n elif eslf.mMethod == \"bz2\":\n self.mDatabaseFile = bz2.BZ2File( self.mNameDb )\n elif self.mMethod == \"debug\":\n self.mDatabaseFile = Uncompressor( self.mDbname + \".debug\", lambda x: x ) \n\n self.mIndex = {}\n\n for line in open(self.mNameIndex, \"r\"):\n\n if line.startswith(\"#\"): continue\n data = line[:-1].split(\"\\t\")\n\n # index with random access points\n if len(data) > 4:\n (identifier, pos_id, block_size, lsequence) = bytes(data[0]), int(data[1]), int(data[2]), int(data[-1])\n points = map(int, data[3:-1])\n self.mIndex[int(identifier)] = (pos_id, block_size, lsequence, points)\n else:\n (identifier, pos_id, pos_seq, lsequence) = bytes(data[0]), int(data[1]), int(data[2]), int(data[-1])\n self.mIndex[int(identifier)] = (pos_id, pos_seq, lsequence) \n \n self.mIsLoaded = True", "def index_nodes(self):\n out = {}\n\n #avg = np.mean(list(self.rtype_vectors.values()),axis=0)\n\n\n #for name, node in self.nodes.items():\n # tmp1 = [self.rtype_vectors[rtype]\n # for rtype, dest in node.outgoing_relations] or [NULL_VEC()]\n # tmp2 = [permute_rtype_vector(self.rtype_vectors[rtype])\n # for rtype, prev in node.incoming_relations] or [NULL_VEC()]\n\n # net = tmp1 + tmp2\n\n # #out[name] = np.asarray(net).mean(axis=0)\n # #out[name] = np.asarray(net).sum(axis=0)\n # v = np.asarray(net).sum(axis=0)\n # if v.any():\n # out[name] = v/max(v)#softmax(v/max(v))\n # else:\n # out[name] = v\n\n\n #avg = np.mean(list(out.values()),axis=0)\n\n #maxm = np.max(list(out.values()),axis=0)\n\n ####normalize everything\n #for r,v in out.items():\n # if v.any():\n # #out[r] = v / sqrt(v.dot(v))\n # out[r] = softmax((v-avg)/maxm)\n\n\n\n # PCA method 0001701\n rmap = self.rtype_vectors\n data = np.zeros((len(self.nodes), JACCARD_DIMENSIONS), dtype=np.float)\n ix = 0\n for node in self.nodes.values():\n\n #compute weighted average of each relation type\n tmp = [rmap[rtype] for \n rtype, dest in node.outgoing_relations] + \\\n [permute_rtype_vector(rmap[rtype]) for \n rtype, prev in node.incoming_relations]\n\n v = np.asarray(tmp).mean(axis=0) if tmp else NULL_VEC()\n\n #normalize\n if v.any():\n data[ix] = v / sqrt(v.dot(v))\n else:\n data[ix] = v\n ix += 1\n\n #eliminate projection onto first 7 principal components\n d2 = data - PCA(data, 7)\n\n #order of nodes is preserved\n for i,v in enumerate(self.nodes):\n out[v] = softmax(d2[i])\n\n return out", "def compute_map(self):\n number_of_orders = 0\n orders = []\n for i, line in enumerate(self.__grid):\n for j, column in enumerate(line):\n if self.__grid[i][j][\"humans\"] != 0:\n number_of_orders += 1\n orders.append(i)\n orders.append(j)\n orders.append(self.__grid[i][j][\"humans\"])\n orders.append(0)\n orders.append(0)\n if self.__grid[i][j][\"vampires\"] != 0:\n number_of_orders += 1\n orders.append(i)\n orders.append(j)\n orders.append(0)\n orders.append(self.__grid[i][j][\"vampires\"])\n orders.append(0)\n if self.__grid[i][j][\"werewolves\"] != 0:\n number_of_orders += 1\n orders.append(i)\n orders.append(j)\n orders.append(0)\n orders.append(0)\n orders.append(self.__grid[i][j][\"werewolves\"])\n return number_of_orders, orders", "def fix_metadata(self, cubes):\n cube = self.get_cube_from_list(cubes)\n lat_coord = cube.coord('latitude', dimensions=(1, ))\n lon_coord = cube.coord('longitude', dimensions=(2, ))\n lat_coord.standard_name = None\n lat_coord.long_name = 'grid_latitude'\n lat_coord.var_name = 'i'\n lat_coord.units = '1'\n lon_coord.standard_name = None\n lon_coord.long_name = 'grid_longitude'\n lon_coord.var_name = 'j'\n lon_coord.units = '1'\n lon_coord.circular = False\n return cubes", "def fix_metadata(self, cubes):\n cube = self.get_cube_from_list(cubes)\n lat_coord = cube.coord('latitude', dimensions=(1, ))\n lon_coord = cube.coord('longitude', dimensions=(2, ))\n lat_coord.standard_name = None\n lat_coord.long_name = 'grid_latitude'\n lat_coord.var_name = 'i'\n lat_coord.units = '1'\n lon_coord.standard_name = None\n lon_coord.long_name = 'grid_longitude'\n lon_coord.var_name = 'j'\n lon_coord.units = '1'\n lon_coord.circular = False\n return cubes", "def _rebuildStructs(self) -> None:\n # Rebuild the other reverse lookup\n newDict = defaultdict(list)\n\n for vortexUuid, gridKeys in self._observedGridKeysByVortexUuid.items():\n for gridKey in gridKeys:\n newDict[gridKey].append(vortexUuid)\n\n keysChanged = set(self._observedVortexUuidsByGridKey) != set(newDict)\n\n self._observedVortexUuidsByGridKey = newDict\n\n # Notify the server that this client service is watching different grids.\n if keysChanged:\n d = ClientGridLoaderRpc.updateClientWatchedGrids(\n clientId=self._clientId,\n gridKeys=list(self._observedVortexUuidsByGridKey)\n )\n d.addErrback(vortexLogFailure, logger, consumeError=False)", "def __init__(self):\n self.map = {} #key: val, val, idx\n self.slot = []", "def add_to_index(index, keyword, url):\n for entry in index:\n if entry[0] == keyword:\n for link in entry[1]:\n if link[0] == url:\n return\n entry[1].append([url, 0])\n return\n # not found, add new keyword to index\n index.append([keyword, [[url, 0]]])", "def __init__(self):\n self.hashmap = [[] for _ in range(self._cap)]", "def htable(nbuckets):", "def socket_cache_set(self, context,data):\r\n global data_cache\r\n s_id = self.get_socket_hash(self)\r\n s_ng = self.id_data.name\r\n if s_ng not in data_cache:\r\n data_cache[s_ng] = {}\r\n data_cache[s_ng][s_id] = data\r\n output_nodes = set()\r\n if self.is_linked and self.is_output:\r\n for node_output_link in self.links:\r\n output_nodes.add(node_output_link.to_node)\r\n for node in output_nodes:\r\n node.execute_node(context)", "def compress_coordinate(elements: list) -> dict:\n\n # See:\n # https://atcoder.jp/contests/abc036/submissions/5707999?lang=ja\n compressed_list = sorted(set(elements))\n return {element: index for index, element in enumerate(compressed_list)}", "def __init__(self):\n array = db.getDataFromString(\"execute [dbo].[ranking] '1/1/2016', 365\")\n self.ranking = {}\n for i in array:\n if i[0] not in self.ranking:\n self.ranking[i[0]] = {}\n self.ranking[i[0]][i[1]] = i[2]", "def add_vertex(self, key):\n # increment the number of vertices\n self.num_vertices += 1\n # create a new vertex\n vertex = Vertex(key)\n # add the new vertex to the vertex dictionary with a list as the value\n # self.vert_dict[vertex] = []\n # add the new vertex to the vertex list\n self.vert_dict[key] = vertex\n # return the new vertex\n return vertex", "def create_map():\n pass\n # for line in range(0, shared.lines):\n # map_data[line][0] = (1, -1)\n # map_data[line][shared.columns - 1] = (1, -1)\n #\n # for column in range(0, shared.columns):\n # map_data[0, column] = (-1, 1)\n # # if column <= shared.left_space or column > shared.columns - shared.left_space:\n # map_data[shared.lines - 1, column] = (-1, 1)", "def __init__(self):\n self.items = []\n self.indexes: Dict[int, Set] = defaultdict(set)", "def mg_index_events(cache):\n\n eid = None\n while True:\n event = yield eid\n eid = event['id']\n for name, func in INDEX_FUNCTION.items():\n key = func(event)\n if key is not None:\n index = cache.setdefault(name, {})\n index.setdefault(key, {})[eid] = event", "def items(self):\n ix_obj = list(self.d_buffer.keys())\n ix_obj.sort()\n l_obj = [self.d_buffer[ix] for ix in ix_obj]\n\n return ix_obj, l_obj", "def set_keys(self):\n self.inventory_dict['csah'] = {'hosts': '{}'.format(socket.getfqdn()), 'vars': {}}" ]
[ "0.52169806", "0.5209674", "0.5192503", "0.5059949", "0.50562185", "0.50340647", "0.503215", "0.49723428", "0.4956823", "0.4896037", "0.48916155", "0.48439723", "0.48249584", "0.48159158", "0.48155957", "0.47965276", "0.4795622", "0.47880176", "0.47797826", "0.47521746", "0.47491118", "0.47464606", "0.47442117", "0.47317487", "0.47051093", "0.46982402", "0.46964827", "0.46813753", "0.4680806", "0.46805185", "0.4672306", "0.46694344", "0.46624216", "0.46588516", "0.46558893", "0.46498817", "0.46477398", "0.46477398", "0.46167636", "0.46146464", "0.46100235", "0.46050683", "0.46014085", "0.46003452", "0.45958796", "0.4593172", "0.45833182", "0.45695913", "0.4564381", "0.45638397", "0.45636323", "0.45592904", "0.45510766", "0.45505255", "0.45450795", "0.45348078", "0.45290384", "0.4526577", "0.45224756", "0.45224205", "0.45185438", "0.4511217", "0.45106816", "0.44974524", "0.44957685", "0.4493024", "0.44868279", "0.4486746", "0.4479412", "0.44746315", "0.44729957", "0.4464557", "0.4463695", "0.4463633", "0.44589445", "0.4457899", "0.44577122", "0.44573474", "0.44572884", "0.4454909", "0.4454792", "0.44546676", "0.4448223", "0.4446259", "0.44447652", "0.44447652", "0.44432437", "0.44431365", "0.44424242", "0.4440771", "0.4435618", "0.4435303", "0.44301692", "0.44233647", "0.44232646", "0.44187608", "0.44160095", "0.44155672", "0.44148442", "0.44141147" ]
0.58312446
0
Performs the right division 'dst / src', and moves 'result_in' to 'dst' to either retrieve the division or the modulo
def perform_divide(c, dst, src, result_in8, result_in16): # We're likely going to need to save some temporary variables # No 'push' or 'pop' are used because 'ah /= 3', for instance, destroys 'al' # 'al' itself can be pushed to the stack, but a temporary variable can hold tmps = TmpVariables(c) # In division, the dividend is divided by the divisor to get a quotient # dividend \ divisor # quotient large_divide = max(dst.size, src.size) > 8 if large_divide: # 16-bits mode, so we use 'ax' as the dividend dividend = 'ax' # The second factor cannot be an inmediate value # Neither AX/DX or variants, because those are used # Neither 8 bits, because 16 bits mode is required if src.code[0] in 'ad' or src.size != 16 or src.value is not None: # Either we use a register, which we would need to save/restore, # or we use directly a memory variable, which is just easier divisor = tmps.create_tmp('divisor', size=16) else: divisor = src.code # If the destination is DH or DL, we need to save the opposite part # If the destination is not DX, we need to save the whole register if dst[0] == 'd': if dst[-1] == 'h': tmps.save('dl') elif dst[-1] == 'l': tmps.save('dh') elif dst[-1] != 'x': tmps.save('dx') else: tmps.save('dx') # If the destination is AH or AL, we need to save the opposite part # If the destination is not AX, we need to save the whole register if dst[0] == 'a': if dst[-1] == 'h': tmps.save('al') elif dst[-1] == 'l': tmps.save('ah') elif dst[-1] != 'x': tmps.save('ax') else: tmps.save('ax') # Load the dividend and divisor into their correct location helperassign(c, [dividend, divisor], [dst, src]) # Perform the division c.add_code([ f'xor dx, dx', f'div {divisor}' ]) # Move the result from wherever it is helperassign(c, dst, result_in16) else: # 8-bits mode, so we use 'al' as the dividend dividend = 'al' # The second factor cannot be an inmediate value # Neither AX/AH because those are used # Neither 16 bits, because 8 bits mode is required if src.code[0] in 'a' or src.size != 8 or src.value is not None: # Either we use a register, which we would need to save/restore, # or we use directly a memory variable, which is just easier divisor = tmps.create_tmp('divisor', size=8) else: divisor = src.code # If the destination is AH or AL, we need to save the opposite part # If the destination is not AX, we need to save the whole register if dst[0] == 'a': if dst[-1] == 'h': tmps.save('al') elif dst[-1] == 'l': tmps.save('ah') elif dst[-1] != 'x': tmps.save('ax') else: tmps.save('ax') # Load the dividend and divisor into their correct location helperassign(c, [dividend, divisor], [dst, src]) # Perform the division c.add_code([ f'xor ah, ah', f'div {divisor}' ]) # Move the result from wherever it is helperassign(c, dst, result_in8) # Restore the used registers tmps.restore_all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def int_div_inplace(a, b):", "def true_div_inplace(a, b):", "def divup(a, b):\n return (a + b - 1) // b", "def divide(lhs, rhs):\n return _make.divide(lhs, rhs)", "def div(self, source, destination):\n value = bytearray()\n\n dividend = destination\n divider = source\n\n if is_single_scalar_reg(divider):\n value.extend([0xF3, 0x0F, 0x5E]) # divss\n mod = 0b11\n rm = get_register_encoding(divider)\n reg = get_register_encoding(dividend)\n modr_byte = (mod << 6) + (reg << 3) + (rm << 0)\n value.append(modr_byte)\n elif is_double_scalar_reg(divider):\n value.extend([0xF2, 0x0F, 0x5E]) # divsd\n mod = 0b11\n rm = get_register_encoding(divider)\n reg = get_register_encoding(dividend)\n modr_byte = (mod << 6) + (reg << 3) + (rm << 0)\n value.append(modr_byte)\n else:\n # idiv eax = edx:eax / divider\n if divider == ProcessorRegister.accumulator:\n tmp_reg = ProcessorRegister.data\n value += self.copy_from_reg_to_reg(destination=divider,\n source=tmp_reg)\n divider = tmp_reg\n # so dividend is no accumulator\n tmp_reg = ProcessorRegister.accumulator\n value += self.copy_from_reg_to_reg(destination=dividend,\n source=tmp_reg)\n\n tmp_reg = ProcessorRegister.counter\n value += self.copy_from_reg_to_reg(destination=divider,\n source=tmp_reg)\n divider = tmp_reg\n\n src = ProcessorRegister.accumulator\n value += self.copy_from_reg_to_reg(destination=dividend,\n source=src)\n\n # mov eax -> edx\n src = ProcessorRegister.accumulator\n dest = ProcessorRegister.data\n value += self.copy_from_reg_to_reg(destination=dest,\n source=src)\n\n # shift edx by 31 -> contains the highest bits of the dividend,\n # eax the lowest 31 bits\n value += self.shift(ProcessorRegister.data,\n ShiftMode.right_arithmetic,\n amount=31)\n\n value.append(0xf7) # idiv\n\n mod = 0b11\n rm = get_register_encoding(divider)\n reg = 7 # F7 /7 -> 7 in the reg field\n modr_byte = (mod << 6) + (reg << 3) + (rm << 0)\n value.append(modr_byte)\n\n # the result is stored in the acc register, so copy it to the\n # correct result register if needed\n if destination != ProcessorRegister.accumulator:\n register = ProcessorRegister.accumulator\n value += self.copy_from_reg_to_reg(register, dividend)\n\n return value", "def division_algo(a, b):\n return a / b, a % b", "def modulo(x, y) :\n if (x / y) < 1:\n return x\n else:\n return modulo(x - y, y)", "def math_map(val, src, dst):\n return ((val - src[0]) / (src[1] - src[0])) * (dst[1] - dst[0]) + dst[0]", "def div(x, y):\n return x / y", "def division(x, y):\n return x / y", "def __div__(relpath):", "def division(a, b):\n if b != 0:\n return a//b", "def __floordiv__(self, other):\r\n return self.__divmod__(other)[0]", "def div(a,b):\r\n return a/b", "def remainder(left_object, right_object):\n result = left_object % right_object\n if left_object < 0 and result > 0 or left_object > 0 and result < 0:\n result = result - right_object\n return result", "def __floordiv__(self, other: 'SInt') -> 'SInt':\r\n return self.__divmod__(other)[0]", "def divide(self, other):\n return self.multiply(other.reciprocal())", "def scale(val, src, dst):\n try:\n return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]\n except ZeroDivisionError:\n return 0.0", "def divide_images(divident : Image, divisor : Image, destination : Image = None) -> Image:\n\n\n parameters = {\n \"src\":divident,\n \"src1\":divisor,\n \"dst\":destination\n }\n\n execute(__file__, '../clij-opencl-kernels/kernels/divide_images_' + str(len(destination.shape)) + 'd_x.cl', 'divide_images_' + str(len(destination.shape)) + 'd', destination.shape, parameters)\n return destination", "def divide(x, y):\n return x / y", "def divide(x, y):\n return x / y", "def divide(x, y):\n return x / y", "def __rdiv__(self, other):\n return self.__rtruediv__(other)", "def divide(x, y):\n\n return x / y", "def _reduce(self) -> None:\n divisor = self._gcd(self._numerator, self._denominator)\n self._numerator = self._numerator // divisor\n self._denominator = self._denominator // divisor", "def division(a, b):\n return (a // b, a / b)", "def dividir(value, arg):\n return int(value) /int(arg)", "def divide(a, b):\n return a / b", "def div(self, a, b):\n return divmod(a, b)", "def __divmod__(self, other):\r\n other = self._coerce(other)\r\n if other is NotImplemented:\r\n return NotImplemented\r\n\r\n r = runtime.mod(self, other)\r\n q = (self - r) * runtime.reciprocal(other)\r\n return q * 2**self.frac_length, r", "def div(self, a, b):\n return (a / b, a % b)", "def divide(x, y):\n assert y != 0\n if x == 1: return 0, 1\n q, r = divide(x >> 1, y)\n q *= 2\n r *= 2\n if x & 1: r += 1\n if r >= y:\n q += 1\n r -= y\n return q, r", "def __div__(self, other):\n return self.__mul__(1 / other)", "def __div__(self, other):\n return self.__mul__(1 / other)", "def ceildiv(a, b):\n return - (-a // b)", "def pgcd(a, b):\n while a % b != 0:\n a, b = b, a % b\n return b", "def division(self, a, b):\n if not check_arguments(a, b): # check if arguments are numbers\n self.last_result = a / b", "def div_mod_p(self, a, b):\n a = a % self.p\n b = b % self.p\n return a * self.pow_mod_p(b, self.p - 2, self.p) % self.p", "def floor_divide(lhs, rhs):\n return _make.floor_divide(lhs, rhs)", "def floor_div(a, b):\r\n # see decorator for function body\r", "def __rdiv__(self, number):\n return self.__div__(number)", "def rem(a, b):\n return a % b", "def _divide_and_round(a, b):\n # Based on the reference implementation for divmod_near\n # in Objects/longobject.c.\n q, r = divmod(a, b)\n # round up if either r / b > 0.5, or r / b == 0.5 and q is odd.\n # The expression r / b > 0.5 is equivalent to 2 * r > b if b is\n # positive, 2 * r < b if b negative.\n r *= 2\n greater_than_half = r > b if b > 0 else r < b\n if greater_than_half or r == b and q % 2 == 1:\n q += 1\n\n return q", "def divide(x, y):\n return round(x / y)", "def scale(val, src, dst):\r\n return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]", "def __div__(self,that):\n return self.__opExpand2(that, np.divide)", "def the_remainder_of_the_division(numb1, numb2):\r\n return f\"Your result: {numb1%numb2}\"", "def Div(a, b):\n\tRequire(b > 0)\n\tc = a / b\n\treturn c", "def __floordiv__(self, other):\r\n return NotImplemented", "def __floordiv__(self, other):\n return MyCustomNumber(self.value // other.value)", "def div(a, b):\n a = float(a)\n b = float(b)\n return a / b", "def mod(a: Decimal, b: Decimal) -> Decimal:\n return a % b", "def divide_exact(n,d):\n return floordiv(n,d),mod(n,d)", "def trunc_divide(lhs, rhs):\n return _make.trunc_divide(lhs, rhs)", "def divide(self, a, b):\n return a / b", "def scale(val, src, dst):\n return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]", "def scale(val, src, dst):\n return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]", "def __divmod__(self, other: 'SInt') -> 'SInt':\r\n if type(self) != type(other):\r\n raise TypeError(\"Wrong type or length for other\")\r\n\r\n size = max(self.nbBytes, other.nbBytes)\r\n Divid, Divis = abs(self).cast(size), abs(other).cast(size)\r\n Quotient = SInt(size)\r\n one = SInt(size)\r\n one.binaire = '0' * (size * 8 - 1) + '1'\r\n Quotient.binaire = '0' * 8 * size\r\n while Divis < Divid or Divis == Divid:\r\n Quotient += one\r\n Divid -= Divis\r\n # Here, the remain is the dividende\r\n Remainer = Divid\r\n if self.signe != other.signe: # Problems occur only with different signs\r\n if Remainer.valeur() == 0: # When abs(a) % abs(b) == 0, there is specific instructions\r\n Quotient = -Quotient\r\n else:\r\n Quotient = -(Quotient + one)\r\n # ---------------------------------------------------------------------------------\r\n Remainer = ((self.cast(2 * size) - (Quotient * other)) << size * 8).cast(size)\r\n # ---------------------------------------------------------------------------------\r\n if self.signe == other.signe == '1':\r\n Remainer = - Remainer\r\n return Quotient, Remainer", "def __rdivmod__(self, other):\r\n return NotImplemented", "def __rdivmod__(self, other):\r\n return NotImplemented", "def the_division_is_aimed(numb1, numb2):\r\n return f\"Your result: {numb1//numb2}\"", "def divide(numerator, denominator):\n ensure_divisibility(numerator, denominator)\n return numerator // denominator", "def modulus(x, y):\n return x % y", "def test_div(self):\n funcs = ['div', 'div_']\n for func in funcs:\n for tensor_type in [lambda x: x, SharedTensor]:\n tensor1 = get_random_test_tensor()\n tensor2 = get_random_test_tensor(max_value=0.5) + 1.5\n encrypted = SharedTensor(tensor1)\n encrypted2 = tensor_type(tensor2)\n reference = getattr(tensor1, func)(tensor2)\n encrypted_out = getattr(encrypted, func)(encrypted2)\n msg = '%s %s failed' % (\n 'private' if tensor_type is SharedTensor else 'public',\n func)\n self._check(encrypted_out, reference, msg)\n if '_' in func:\n # Check in-place op worked\n self._check(encrypted, reference, msg)\n else:\n # Check original is not modified\n self._check(encrypted, tensor1, msg)", "def __rdivmod__(self, other):\n quot = self.__rfloordiv__(other)\n res = self.__rmod__(other)\n if quot != NotImplemented and res != NotImplemented:\n return (quot, res)\n return NotImplemented", "def beat_division(a,b):\n if b == 0:\n return 0\n return a // b", "def modinverse(a: int, m: int) -> int:\n if SHOW_WORKING: print(f\"modinverse(a, m) = modinverse({a}, {m})\")\n if SHOW_WORKING: print(f\"\\tWe want to find some x & y such that {a} * x + {m} * y = 1\")\n\n if a < 0 or m <= 0:\n raise ValueError(\"a must be non-negative and m must be positive\")\n\n if SHOW_WORKING: print(f\"Find gcd(a, b) = gcd({a}, {m})\")\n if m > a:\n if SHOW_WORKING: print(f\"\\tb > a. Set r1[0] := m = {m} and r2[0] := a = {a} so that r1[0] > r2[0\")\n r1s, r2s = [m], [a]\n else:\n if SHOW_WORKING: print(f\"\\ta >= b. Set r1[0] := a = {a} and r2[0] := m = {m} so that r1[0] >= r2[0]\")\n r1s, r2s = [a], [m] \n\n if SHOW_WORKING: print(f\"\\tProceeding with algorithm until r2 hits 0. gcd({a}, {m}) will be the ending r1 value:\")\n qs = []\n i = 0\n while r2s[-1] != 0:\n i += 1\n\n if SHOW_WORKING: print(f\"\\t\\tSet q[{i - 1}] := floor(r1[{i - 1}] / r2[{i - 1}]) = floor({r1s[i - 1]} / {r2s[i - 1]}) = floor({round(r1s[i - 1] / r2s[i - 1], 2)}) = {r1s[i - 1] // r2s[i - 1]}\")\n qs.append(r1s[i - 1] // r2s[i - 1])\n\n if SHOW_WORKING: print(f\"\\t\\tSet (r1[{i}], r2[{i}]) := (r2[{i - 1}], r1[{i - 1}] - r2[{i - 1}] * q[{i - 1}]) = ({r2s[i - 1]}, {r1s[i - 1]} - {r2s[i - 1]} * {qs[i - 1]}) = ({r2s[i - 1]}, {r1s[i - 1] - r2s[i - 1] * qs[i - 1]})\")\n r1, r2 = r2s[i - 1], r1s[i - 1] - r2s[i - 1] * qs[i - 1]\n r1s.append(r1)\n r2s.append(r2)\n\n if SHOW_WORKING: print(\"\\t\\t -\")\n \n if SHOW_WORKING: print(f\"\\tStopping condition hit (r2[{i}] = 0). Result of gcd({a}, {m}) is r1[{i}] = {r1s[-1]}\")\n\n if r1s[-1] != 1:\n if SHOW_WORKING: print(f\"\\t{a} has no inverse modulo {m} because gcd({a}, {m}) = {r1s[-1]} != 1 (they must be coprime)\")\n return None\n\n if SHOW_WORKING: print(f\"\\n\\tBegin working backwards:\")\n\n def getnestedexpressionstr(leftstr: str, nestedr1r2q: List[Union[int, List[int]]], rightstr: str) -> str:\n if SHOW_WORKING: print(f\"\\t\\tgetnestedexpressionstr('{leftstr}', {nestedr1r2q}, '{rightstr}')\")\n r1: int = nestedr1r2q[0]\n r2: Union[int, List[int]] = nestedr1r2q[1]\n q: int = nestedr1r2q[2]\n if SHOW_WORKING: print(f\"\\t\\t\\tr1 = {r1}, r2 = {r2}, q = {q}\")\n\n if isinstance(r2, int):\n return f\"{leftstr}{r1} - {r2} * {q}{rightstr}\"\n \n if leftstr == rightstr == '':\n return getnestedexpressionstr(f\"{r1} - (\", r2, f\") * {q}\")\n\n return getnestedexpressionstr(f\"{leftstr}{r1} - (\", r2, f\") * {q}{rightstr}\")\n\n def backtrack(index: int, nestedr1r2q: List[Union[int, List[int]]]) -> List[Union[int, List[int]]]:\n \"\"\"Provided an index and an ordered list representing the r1, r2, and q values of the equation\n r1 - r2 * q, this function returns another list where r2 has been broken down to the parts of \n its equation on the previous indexed equation, e.g. if the 3rd and 4th equations from the GCD \n algorithm are:\n (3): r1 - r2 * q2 = 4 - 4 * 1\n (4): r1 - r2 * q2 = 3 - 1 * 3\n then: \n backtrack(4, [3, 1, 3]) -> [3, [4, 3, 1], 3].\n \n This also works when the middle element of the list (the r2 element) is given as a list of parts,\n e.g., if we follow the previous example where additionally equation 2 is:\n (2): r1 - r2 * q2 = 11 - 4 * 2\n then:\n backtrack(3, [3, [4, 3, 1], 3]) -> [3, [4, [11, 4, 2], 1], 3].\"\"\"\n \n if SHOW_WORKING: print(f\"\\t\\tbacktrack({index}, {nestedr1r2q})\")\n\n if index <= 0:\n raise ValueError(\"Can't backtrack from here, please supply a positive index\")\n \n r1: int = nestedr1r2q[0]\n r2: Union[int, List[int]] = nestedr1r2q[1]\n q: int = nestedr1r2q[2]\n\n if index == 1:\n return [r1, [r1s[0], r2s[0], qs[0]], q]\n\n return [r1, backtrack(index - 1, [r1s[index - 1], r2s[index - 1], qs[index - 1]]), q]\n\n if i - 2 > 0:\n expression = backtrack(i - 2, [r1s[i - 2], r2s[i - 2], qs[i - 2]])\n\n nestedexpressionstr: str = getnestedexpressionstr('', expression, '')\n nestedexpressionstr = nestedexpressionstr.replace(str(a), 'a').replace(str(m), 'm')\n\n if SHOW_WORKING: print(f\"\\t\\t{nestedexpressionstr}\")\n if SHOW_WORKING: print(f\"\\t\\t{sympy.simplify(nestedexpressionstr)}\")\n\n x, y = sympy.core.numbers.igcdex(a, m)[:2]\n if SHOW_WORKING: print(f\"\\ta * x + m * y = 1 -> {a} * {x} + {m} * {y} = 1\")\n\n if SHOW_WORKING: print(f\"\\tmodinverse({a}, {m}) = {x}\\t(mod {m}) = {x % m}\")\n \n return x % m", "def scale(val, src, dst):\n return ((val - src[0]) / (src[1] - src[0])) * (dst[1] - dst[0]) + dst[0]", "def truncate_div_compute(input_x, input_y, output_x,\n kernel_name=\"truncate_div\"):\n shape_list = broadcast_shapes(\n te.lang.cce.util.shape_to_list(input_x.shape),\n te.lang.cce.util.shape_to_list(input_y.shape),\n param_name_input1=\"input_x\", param_name_input2=\"input_y\")\n int_list = (\"int32\", \"int8\", \"uint8\")\n input_dtype = input_x.dtype\n\n if input_dtype in int_list:\n data_zero = te.lang.cce.broadcast(tvm.const(0, 'float32'),\n shape_list[2], 'float32')\n data_x_broad = te.lang.cce.cast_to(input_x, 'float32')\n data_y_broad = te.lang.cce.cast_to(input_y, 'float32')\n data_x_broad = te.lang.cce.broadcast(data_x_broad, shape_list[2])\n data_y_broad = te.lang.cce.broadcast(data_y_broad, shape_list[2])\n res_div = te.lang.cce.vdiv(data_x_broad, data_y_broad)\n res_min_int = te.lang.cce.ceil(te.lang.cce.vmin(res_div, data_zero))\n res_max_int = te.lang.cce.floor(te.lang.cce.vmax(res_div, data_zero))\n res_trunc = te.lang.cce.vadd(res_min_int, res_max_int)\n else:\n if tbe_platform.cce_conf.api_check_support(\"te.lang.cce.vlog\",\n \"float32\"):\n input_x = te.lang.cce.cast_to(input_x, 'float32')\n input_y = te.lang.cce.cast_to(input_y, 'float32')\n data_x_broad = te.lang.cce.broadcast(input_x, shape_list[2])\n data_y_broad = te.lang.cce.broadcast(input_y, shape_list[2])\n res_trunc = te.lang.cce.vdiv(data_x_broad, data_y_broad)\n\n res = te.lang.cce.cast_to(res_trunc, input_dtype)\n\n return res", "def div1(left: float, right: float) -> float:\n return left / right", "def __div__(self, oth):\n\t\treturn Matrix._make_new(lambda i,j: self.data[i][j] / oth, self.rows, self.cols)", "def divide_and_round_up(x, y):\n return ((x - 1) // y) + 1", "def simplify_fraction(a, b):\n c = gcd(a, b)\n return a // c, b // c", "def __ifloordiv__(self, obj):\n # calls __floordiv__\n tmp = self // obj\n self.data = tmp.data\n return self", "def __divmod__(self, other):\r\n return NotImplemented", "def mcd(a, b):\n while(b != 0):\n a,b = b,a%b\n return a", "def quotient(left_object, right_object):\n return int(float(left_object)/right_object)", "def __rfloordiv__(self, other):\n return self.__rtruediv__(other)", "def divide(n1, n2):\n return n1 / n2", "def get_divide_ab(a, b): # IN= 2'int' / OUT= 1'foat'\n return float(a/b)", "def __reduce(self):\n if self.denominator <0:\n self.denominator *= -1\n self.numerator *= -1\n gcd = math.gcd(int(self.denominator),int(self.numerator))\n if self.denominator != 0 and self.numerator!= 0:\n if gcd > 0:\n self.denominator /= gcd\n self.numerator /= gcd\n self.numerator = int(self.numerator)\n self.denominator = int(self.denominator)", "def __mod__( self, value ):\r\n\t\tif ( type( value ) == type( self ) ):\r\n\t\t\treturnvalue = fraction( self )\r\n\t\t\tif ( returnvalue < 0 ):\r\n\t\t\t\twhile ( returnvalue < -value ): returnvalue += value\r\n\t\t\telse:\r\n\t\t\t\twhile ( returnvalue > value ): returnvlaue -= value\r\n\t\t\treturn returnvalue\r\n\t\telif ( type( value ) in ( types.IntType, types.LongType ) ):\r\n\t\t\treturn fraction( self.numerator % ( value * self.denominator ), self.denominator )\r\n\t\telif ( type ( value ) == types.FloatType ):\r\n\t\t\treturn float( self ) % value\r\n\t\telse: return NotImplemented", "def __rdiv__(self, _scalar):\n\t\treturn self / _scalar", "def div(a: Decimal, b: Decimal) -> Decimal:\n return a / b", "def mod(dividends, divisor):\n\n output = np.zeros(len(dividends))\n\n for i in tqdm(range(len(dividends))): \n output[i] = dividends[i]\n done=False\n while (not done):\n if output[i] >= divisor:\n output[i] -= divisor\n elif output[i] < 0.:\n output[i] += divisor\n else:\n done=True\n\n return output", "def div2(left: float, right: float) -> float:\n return left / right", "def div(num1, num2):\n return num1 / num2", "def div(num1, num2):\n return num1 / num2", "def div(a, b):\n if b == 0:\n raise ValueError('zero division error')\n return a / b", "def div(a, b):\r\n if type(b) in inttypes_set:\r\n if not b:\r\n return Infinity(a)\r\n raise ZeroDivisionError('%r / %r' % (a, b))\r\n if b == 1:\r\n return a\r\n if type(a) in inttypes_set:\r\n return normalized_fraction(a, b)\r\n return a / b", "def ratio_func(a, b):\n return a / b", "def ceil_division(left_number, right_number):\n\t\t\treturn -(-left_number // right_number)", "def divide(value, arg):\n\treturn float(value) / float(arg)", "def __mod__(self, other: 'SInt') -> 'SInt':\r\n return self.__divmod__(other)[1]", "def test_floordiv():\n value = 42\n num_a = param.Integer(value=value)\n assert num_a.value == value\n\n new_value = value // 2\n num_a.value //= 2\n assert num_a.value == new_value", "def round_up_div(value: int, divisor: int) -> int:\n return (value + divisor - 1) // divisor", "def algo(a: int, b: int) -> int:\n\n while b != 0:\n a, b = b, a % b\n return a", "def __rdiv__(self,that):\n B = that if isinstance(that,Factor) else Factor([],that)\n return B.__opExpand2(self, np.divide)", "def test_dividing(self):\n divider = Divider()\n\n for i in range(-10, 10):\n for j in range(-10, 10):\n if j != 0:\n self.assertEqual(i/j, divider.calc(j, i))", "def f(a, b, d):\n i = 1\n mod = -1\n\n if d and int(b/d) != b/d:\n # kdyz neni ``b`` delitelne celociselne ``d``, zadny nasobek ``a`` ho nedozene\n # TODO: to je ovsem tezka hypoteza a chtela by matematicky overit\n return mod\n\n while mod != 0:\n mod = (i*a - d) % b\n i += 1\n return i-1" ]
[ "0.6179031", "0.6050849", "0.59951925", "0.5947101", "0.5941931", "0.5891567", "0.58224696", "0.5819864", "0.5762812", "0.57600236", "0.57048416", "0.5681911", "0.5675549", "0.5665095", "0.5660404", "0.5639097", "0.56374323", "0.5618714", "0.56052095", "0.55648005", "0.55648005", "0.55648005", "0.5562422", "0.55584466", "0.55530703", "0.5542015", "0.554139", "0.55280715", "0.5490551", "0.5490219", "0.5489364", "0.54789513", "0.54340833", "0.54340833", "0.54266125", "0.54259443", "0.5401198", "0.539955", "0.53993225", "0.5368386", "0.53682387", "0.5368107", "0.5365745", "0.535906", "0.53527874", "0.5351394", "0.5348705", "0.53474665", "0.534094", "0.53345835", "0.5332507", "0.5324006", "0.53191674", "0.530497", "0.5304076", "0.5300652", "0.5300652", "0.52994764", "0.52887213", "0.52887213", "0.5287483", "0.52827626", "0.52738196", "0.5269865", "0.52671885", "0.5263746", "0.5254343", "0.5245001", "0.52415586", "0.5232721", "0.5230603", "0.5217579", "0.5209972", "0.520491", "0.5203113", "0.51955616", "0.51929253", "0.51818514", "0.51764274", "0.516485", "0.5161023", "0.51451504", "0.51426053", "0.51394135", "0.51343596", "0.5116835", "0.5112349", "0.5112349", "0.5067019", "0.50598216", "0.5058255", "0.50577354", "0.50557595", "0.50524443", "0.5043504", "0.50399375", "0.5031979", "0.5028501", "0.5027543", "0.50230587" ]
0.7103163
0
Given the scale of WGUPS shipping, 1000 is a reasonable backing array size
def __init__(self): self.size = 1000 self.mapArray = [None] * self.size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n self.size = 1000000\n self.mp = [[]] * self.size", "def scale_in(self, count):\n pass", "def limit_size(self, catalog):\n if len(catalog)<=self.limit:\n return catalog\n mem = {}\n for instance in catalog:\n if (instance['vCpu'], math.ceil(instance['memory'])) not in mem:\n mem[(instance['vCpu'], math.ceil(instance['memory']))] = instance\n out = [val for val in mem.values()]\n if len(out)>self.limit:\n out = sorted(out, key=lambda x: x['onDemandUsdPrice'])\n return out[:self.limit]\n return out", "def __len__(self):\r\n return 100000", "def MAXMEM(self):", "def _assign_sizes(self):", "def const_size():\r\n return 1000.0", "def __len__(self):\n return max(self.A_size, self.B50_size, self.B100_size, self.B150_size)", "def memory(self):\n mem_size_list = []\n gig_size = self.random.randint(1,32)\n size = gig_size * 1073741824\n suffixes=['B','KB','MB','GB','TB']\n suffixIndex = 0\n while size > 1024 and suffixIndex < 4:\n suffixIndex += 1 #increment the index of the suffix\n size = size/1024.0 #apply the division\n mem_size_list.append(f\"{size:.2f} {suffixes[suffixIndex]}\")\n return mem_size_list", "def scale(self):", "def get_additional_ball_capacity(self):\n return 999", "def test_batch_size_pack_size():", "def _shrink_arr(self):\n self._resize_arr(self._capacity // self._growth_factor)", "def Capacity(self) -> int:", "def numberOfPoints(self):\n return 20000", "def get_additional_ball_capacity(cls):\n return 999", "def subbandwidth(self):", "def minsize(self):# -> int:\r\n return 0", "def _shrink(self):\n self.capacity = round(self.capacity / self.factor)\n temp = [None] * self.capacity\n for i in range(self.capacity):\n temp[i] = self.store[i]\n self.store = temp", "def disk(self):\n disk_size_list = []\n precision = 2\n size = self.random.randint(1, 1099511627776)\n suffixes=['B','KB','MB','GB','TB']\n suffixIndex = 0\n while size > 1024 and suffixIndex < 4:\n suffixIndex += 1 #increment the index of the suffix\n size = size/1024.0 #apply the division\n disk_size_list.append(f\"{size:.2f} {suffixes[suffixIndex]}\") #.format(size,suffixes[suffixIndex]))\n return disk_size_list", "def record_batch_size(self):\n return 10000", "def _grow(self):\n self.capacity *= self.factor\n temp = [None] * self.capacity\n for i in range(self.size):\n temp[i] = self.store[i]\n self.store = temp", "def cpu_k_space_fq_allocation(n, sv, mem):\n return int(math.floor(\n float(.8 * mem - 4 * sv * n - 12 * n) / (4 * (3 * sv + 4))\n ))", "def __len__(self):\n return 9 # logsfr_ratios has 6 bins", "def test_2d_lowmem():\n dic, data = ng.bruker.read_lowmem(os.path.join(DATA_DIR, \"bruker_2d\"))\n assert dic['FILE_SIZE'] == 3686400\n assert data.shape == (600, 768)\n assert round(data[0, 40].real, 2) == 28.0\n assert round(data[0, 40].imag, 2) == -286.0\n assert round(data[13, 91].real, 2) == -7279.0\n assert round(data[13, 91].imag, 2) == -17680.0\n lowmem_write_readback(dic, data)", "def __init__(self, size=800, scale=(3. / 4, 5. / 2)):\n assert isinstance(size, int)\n assert isinstance(scale, float) or isinstance(scale, tuple)\n self.size = size\n self.scale = scale if isinstance(scale, tuple) \\\n else (1 - scale, 1 + scale)", "def GetScaleBlocks(width):\n\n rord=numpy.log10(abs(width)/2.0)\n nrord=rord % 1\n\n if nrord < numpy.log10(2):\n spc=0.2*pow(10,numpy.floor(rord))\n smallspc=spc\n bigspc=5*spc\n newspc=[0,smallspc,smallspc*2,smallspc*3,smallspc*4,smallspc*5]\n elif nrord < numpy.log10(5):\n spc=0.5*pow(10,numpy.floor(rord))\n smallspc=spc\n bigspc=5*spc\n newspc=[0,smallspc,smallspc*2,smallspc*3,smallspc*4]\n else:\n spc=pow(10,numpy.floor(rord))\n smallspc=spc\n bigspc=spc*5\n newspc=[0,smallspc,smallspc*2,smallspc*3,smallspc*4,smallspc*5]\n\n if len(newspc) == 5:\n #labels=['0',None,\"%g\" % smallspc*2,None,\"%g\" % (smallspc*4)]\n labels=['0',None,None,None,\"%g\" % (smallspc*4)]\n else:\n labels=['0',None,None,None,None,\"%g\" % (smallspc*5)]\n\n temp_max=newspc[len(newspc)-1]\n start=temp_max\n for temp in numpy.arange(start,width-bigspc/2,bigspc):\n temp_max=temp_max+bigspc\n newspc.append(temp_max)\n labels.append(\"%g\" % temp_max)\n\n #start=temp_max\n #for temp in Numeric.arange(start,width-smallspc/2,smallspc):\n # labels.append(None)\n # temp_max=temp_max+smallspc \n # newspc.append(temp_max) \n\n return (numpy.array(newspc,numpy.float32),labels)", "def __len__(self):\n a = 1\n for size in self.sizes:\n a *= size\n return a", "def __init__(self, maxSize): \n self.front = 0\n self.end = 0\n self.qSize = 0\n self.data = arr('i', (0 for i in range(maxSize)))", "def fix_size(value):\n try:\n obj_size = int(float(value) * wx.GetApp().settings.size_coeff)\n except AttributeError:\n obj_size = int(value)\n return obj_size", "def SendPacketsSendSize(self) -> int:", "def __init__(self, size: int = 100):\n self.data = [None] * size\n self.size = size", "def GetScale(self):\n ...", "def __len__(self) -> int:\n return self.disp_size ** 2", "def calc_size(self):\r\n pass", "def x_size(self):\n pass", "def recommended_max_num_datapoints(self) -> int:\n # very large number, essentially no limit by default\n return 1e9", "def pack_unpack_extreme():\n # this will create a huge array, and then use the\n # blosc.BLOSC_MAX_BUFFERSIZE as chunk-szie\n pack_unpack(300, chunk_size=blosc.BLOSC_MAX_BUFFERSIZE,\n progress=simple_progress)", "def _get_free_capacity(self):\n\n capacity = np.ones(len(self.grid.T)) * len(self.grid)\n capacity -= np.count_nonzero(self.grid, axis=0)\n return capacity", "def __init__(self):\n self.capacity = 1000\n self.data = [None]*self.capcity", "def scale(self):\n return self._gev_bijector.scale", "def unitSize( cls, value, typeCode=None ):\n return value.shape[-1]", "def get_scale():\r\n\r\n \r\n return 0.5", "def reduceScale(structure,scale,dim,prec=1E-2):\n\n structure.translate_sites(indices=range(structure.num_sites),\n vector=-1*structure.sites[0].frac_coords+[0,0,.5])\n\n specs = structure.species\n cart = [x.coords for x in structure.sites]\n\n lat = np.array(structure.lattice.as_dict()['matrix'])\n if type(scale)==type(1) or type(scale)==type(1.0):\n lat[0]*=scale\n if dim>=2:\n lat[1]*=scale\n if dim==3:\n lat[2]*=scale\n else:\n lat[0]*=scale[0]\n lat[1]*=scale[1]\n lat[2]*=scale[2] \n\n fracs = np.around(abs(np.array(np.linalg.solve(lat.T,np.array(cart).T).T)),4)\n\n specs = []\n u_cart = []\n i=0\n n_fracs = []\n for frac in fracs:\n if not np.any([magni(np.around(frac,4)%1-np.around(x,4)%1)<prec for x in n_fracs]):\n n_fracs.append(frac%1)\n #if frac[0]<1 and frac[1]<1 and frac[2]<1:\n \n specs.append(structure.species[i])\n u_cart.append(cart[i])\n i+=1\n \n new_sites = []\n i=0\n for site in u_cart:\n p = PeriodicSite(lattice = Lattice(lat),coords=site,\n coords_are_cartesian=True,\n species=specs[i])\n new_sites.append(p)\n i+=1\n\n new_struct = Structure.from_sites(new_sites)\n\n return(new_struct)", "def getSize(self):\n return 1", "def __payload_size(self):\n return (\n self.SIZE_LINEUP_ID + self.players_per_lineup * self.SIZE_PLAYER) * self.entries.count()", "def update_size(self):\n return 3 + self.memory_unit_size", "def __init__(self, size):\n self._storage = []\n self._maxsize = size", "def __init__(self):\n self.size = 1000\n self.bucket = [None] * self.size", "def __sizeof__(self):\r\n\r\n S = 0 # Full size of the object\r\n if self.loss_list is not None:\r\n for value in self.loss_list:\r\n S += getsizeof(value)\r\n if self.meshsol_list is not None:\r\n for value in self.meshsol_list:\r\n S += getsizeof(value)\r\n if self.loss_index is not None:\r\n for key, value in self.loss_index.items():\r\n S += getsizeof(value) + getsizeof(key)\r\n S += getsizeof(self.logger_name)\r\n if self.axes_dict is not None:\r\n for key, value in self.axes_dict.items():\r\n S += getsizeof(value) + getsizeof(key)\r\n S += getsizeof(self.Pstator)\r\n S += getsizeof(self.Protor)\r\n S += getsizeof(self.Pmagnet)\r\n S += getsizeof(self.Pprox)\r\n S += getsizeof(self.Pjoule)\r\n if self.coeff_dict is not None:\r\n for key, value in self.coeff_dict.items():\r\n S += getsizeof(value) + getsizeof(key)\r\n return S", "def tx_set_size_500():\n print('Setting transaction set size to 500')\n upgrade('maxtxsize', 'max_tx_set_size', 500)", "def Sizes(self, with_sign=False):\n\n self.__do_essential_memebers_exist__()\n\n try:\n from Florence import DisplacementFormulation\n except ImportError:\n raise ValueError(\"This functionality requires Florence's support\")\n\n if self.element_type != \"line\":\n # FOR LINE ELEMENTS THIS APPROACH DOES NOT WORK AS JACOBIAN IS NOT WELL DEFINED\n formulation = DisplacementFormulation(self)\n sizes = np.zeros(self.nelem)\n if not with_sign:\n for elem in range(self.nelem):\n LagrangeElemCoords = self.points[self.elements[elem,:],:]\n sizes[elem] = formulation.GetVolume(formulation.function_spaces[0],\n LagrangeElemCoords, LagrangeElemCoords, False, elem=elem)\n else:\n for elem in range(self.nelem):\n LagrangeElemCoords = self.points[self.elements[elem,:],:]\n sizes[elem] = formulation.GetSignedVolume(formulation.function_spaces[0],\n LagrangeElemCoords, LagrangeElemCoords, False, elem=elem)\n return sizes\n\n else:\n warn(\"Sizes of line elements could be incorrect if the mesh is curvilinear\")\n return self.Lengths()", "def _scale_back(self, update_heap_pointers=True):\n # Running total for the new heap size\n heapsize = 0\n\n for indx, name in enumerate(self.dtype.names):\n column = self._coldefs[indx]\n recformat = column.format.recformat\n raw_field = _get_recarray_field(self, indx)\n\n # add the location offset of the heap area for each\n # variable length column\n if isinstance(recformat, _FormatP):\n # Irritatingly, this can return a different dtype than just\n # doing np.dtype(recformat.dtype); but this returns the results\n # that we want. For example if recformat.dtype is 'a' we want\n # an array of characters.\n dtype = np.array([], dtype=recformat.dtype).dtype\n\n if update_heap_pointers and name in self._converted:\n # The VLA has potentially been updated, so we need to\n # update the array descriptors\n raw_field[:] = 0 # reset\n npts = [np.prod(arr.shape) for arr in self._converted[name]]\n\n raw_field[: len(npts), 0] = npts\n raw_field[1:, 1] = (\n np.add.accumulate(raw_field[:-1, 0]) * dtype.itemsize\n )\n raw_field[:, 1][:] += heapsize\n\n heapsize += raw_field[:, 0].sum() * dtype.itemsize\n # Even if this VLA has not been read or updated, we need to\n # include the size of its constituent arrays in the heap size\n # total\n if type(recformat) == _FormatP and heapsize >= 2**31:\n raise ValueError(\n \"The heapsize limit for 'P' format has been reached. \"\n \"Please consider using the 'Q' format for your file.\"\n )\n if isinstance(recformat, _FormatX) and name in self._converted:\n _wrapx(self._converted[name], raw_field, recformat.repeat)\n continue\n\n scale_factors = self._get_scale_factors(column)\n _str, _bool, _number, _scale, _zero, bscale, bzero, _ = scale_factors\n\n field = self._converted.get(name, raw_field)\n\n # conversion for both ASCII and binary tables\n if _number or _str:\n if _number and (_scale or _zero) and column._physical_values:\n dummy = field.copy()\n if _zero:\n dummy -= bzero\n if _scale:\n dummy /= bscale\n # This will set the raw values in the recarray back to\n # their non-physical storage values, so the column should\n # be mark is not scaled\n column._physical_values = False\n elif _str or isinstance(self._coldefs, _AsciiColDefs):\n dummy = field\n else:\n continue\n\n # ASCII table, convert numbers to strings\n if isinstance(self._coldefs, _AsciiColDefs):\n self._scale_back_ascii(indx, dummy, raw_field)\n # binary table string column\n elif isinstance(raw_field, chararray.chararray):\n self._scale_back_strings(indx, dummy, raw_field)\n # all other binary table columns\n else:\n if len(raw_field) and isinstance(raw_field[0], np.integer):\n dummy = np.around(dummy)\n\n if raw_field.shape == dummy.shape:\n raw_field[:] = dummy\n else:\n # Reshaping the data is necessary in cases where the\n # TDIMn keyword was used to shape a column's entries\n # into arrays\n raw_field[:] = dummy.ravel().view(raw_field.dtype)\n\n del dummy\n\n # ASCII table does not have Boolean type\n elif _bool and name in self._converted:\n choices = (\n np.array([ord(\"F\")], dtype=np.int8)[0],\n np.array([ord(\"T\")], dtype=np.int8)[0],\n )\n raw_field[:] = np.choose(field, choices)\n\n # Store the updated heapsize\n self._heapsize = heapsize", "def receptive_field_size(total_layers):\n return 2**total_layers", "def __init__(self, size: int):\r\n self.l = []\r\n self.sz = size\r\n self.sm = 0", "def _grow(self): \n limit = 0\n #Iterating through the list to find the number of elements\n for i in xrange(len(self)):\n if self._items[i] != self._fillValue:\n #There's an element at index i, so update the limit\n limit = i\n \n #Only grow the array if the limit+1 and the physical size is the same.\n if limit+1 == len(self):\n temp = Array(len(self)*2)\n \n #Copy existing elements to the new Array\n for i in xrange(len(self)):\n temp[i] = self._items[i]\n \n #Initialize the new elements to the fillValue\n for j in xrange(len(self), len(self)*2):\n temp[j] = self._fillValue\n self._items = temp", "def state(self):\n decimals = 2\n size_mb = round(self._size/1e6, decimals)\n return size_mb", "def scale(self):\n return self._scale", "def _get_final_size(param_grid):\n tmp = {} # same pattern than param_grid but store the size\n for idx, key in enumerate(param_grid.iterkeys()):\n if isinstance(param_grid[key], list):\n tmp[idx] = [sys.getsizeof(value) for value in param_grid[key]]\n else:\n tmp[idx] = [sys.getsizeof(param_grid[key])]\n return np.array([x for x in itertools.product(*tmp.values())]).sum()", "def order_platescale(self, order_vec, binning=None):\n return np.full(order_vec.size, 0.15)", "def test_processing():\n # It's always harder with a small mailbox:\n strax.Mailbox.DEFAULT_MAX_MESSAGES = 2\n for request_peaks in (True, False):\n for peaks_parallel in (True, False):\n for max_workers in (1, 2):\n Peaks.parallel = peaks_parallel\n print(f\"\\nTesting with request_peaks {request_peaks}, \"\n f\"peaks_parallel {peaks_parallel}, \"\n f\"max_workers {max_workers}\")\n\n mystrax = strax.Context(storage=[],\n register=[Records, Peaks])\n bla = mystrax.get_array(\n run_id=run_id,\n targets='peaks' if request_peaks else 'records',\n max_workers=max_workers)\n assert len(bla) == recs_per_chunk * n_chunks\n assert bla.dtype == (\n strax.peak_dtype() if request_peaks else strax.record_dtype())", "def __len__(self):\n\t\treturn 8", "def __init__(self, *args, **kwargs):\n super(SL1024AFR98, self).__init__(\n max_seq_len=1024, alloc_free_ratio=.98, *args, **kwargs)", "def bands(self) -> int:\n ...", "def ship_rate(self):\n\t\treturn self.industry * (self.manufacturing.level + 5) / 24.0", "def getSize(self):\n return 1", "def min_size(self):\n raise NotImplementedError()", "def get_nlr_size():\n\treturn 4.1 * u.kpc", "def FindScale(self):\n\n ## 6 and from the cv code the distance is 6 then we are good\n print(\"TODO: Very hard\")", "def __init__(self, w_scale = None):\n self.n = 4\n self.m = 2\n self.w_scale = w_scale\n if self.w_scale is None:\n self.w_scale = np.ones(self.n)", "def get_signal_gwgds1072au(a_signal_packed: bytes, a_scale : float ) -> list:\n the_return = None\n the_signal_packed=a_signal_packed\n the_scale=a_scale\n the_signal_sequence=[]\n the_signal=0.0 #TODO reminder check this before allowing it\n the_info=[]\n n=4\n bla=0\n blb=bla+n\n print(the_signal_packed)\n JX=unpack('>%sh' % 2 ,the_signal_packed[bla:blb])\n for ii in range(0,2003):\n the_info.append(unpack('>%sh' % 2 ,the_signal_packed[bla:blb])[0])\n bla=bla+n\n blb=blb+n\n #TODO get the potential scale\n #TODO get the offset\n #TODO get the time scale\n\n return the_info", "def arrayManipulation_shortpeak(n, queries):\n a_s = []\n b_s = []\n k_s = []\n\n for i, row in enumerate(queries):\n a_s.append(row[0])\n b_s.append(row[1])\n k_s.append(row[2])\n\n # breakpoint()\n x = a_s + b_s\n all_indices = list(set(x))\n all_indices.sort()\n short_arr = [0] * len(all_indices)\n\n # mapping index of n-long array to index of shorter array\n index_lookup = {}\n for j, el in enumerate(all_indices):\n index_lookup[el] = j\n\n # breakpoint()\n for m in range(len(a_s)):\n short_arr[index_lookup[a_s[m]]] += k_s[m]\n short_arr[index_lookup[b_s[m]]] -= k_s[m]\n\n maxval = 0\n cumsum = 0\n for i, el in enumerate(short_arr):\n cumsum += el\n maxval = max(maxval, cumsum)\n\n print(f'{maxval: <15,d}: Max value')\n arr_size = short_arr.__sizeof__() / 1000000\n total = ((a_s.__sizeof__() / 1000000)\n + b_s.__sizeof__() / 1000000\n + k_s.__sizeof__() / 1000000\n + queries.__sizeof__() / 1000000\n + index_lookup.__sizeof__() / 1000000\n + short_arr.__sizeof__() / 1000000)\n print(f'{total: <15.2f}: All objects size(MB)')\n print(f'{arr_size: <15.2f}: Array size(MB)')\n return maxval, arr_size", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def _grow_arr(self):\n self._resize_arr(self._capacity * self._growth_factor)", "def bndy_plasma(self):\n self.ne[0], self.ne[-1] = 1e11, 1e11\n self.ni[0], self.ni[-1] = 1e11, 1e11\n self.nn[0], self.nn[-1] = 1e11, 1e11\n self.Te[0], self.Te[-1] = 0.1, 0.1\n self.Ti[0], self.Ti[-1] = 0.01, 0.01\n # self.coll_em[0], self.coll_em[-1] = 1e5, 1e5\n # self.coll_im[0], self.coll_im[-1] = 1e5, 1e5", "def estimate_size(shape):\n total_bytes = reduce(np.multiply, shape) * 8\n return total_bytes / 1E6", "def batch_size(self) -> int:\n ...", "def sn_size(self, val):\n if isinstance(val, int) and val >= 1:\n if val != self._faux._sn_size:\n self._faux._sn_size = val\n self._faux._update()\n else:\n warn(\"`val` not valid, no update performed\")", "def test_wpa2_personal_ssid_dw_batch_size_125_5g(self, lf_test, get_vif_state, lf_tools):\n # run wifi capacity test here\n profile_data = setup_params_general[\"ssid_modes\"][\"wpa2_personal\"][1]\n ssid_name = profile_data[\"ssid_name\"]\n mode = \"BRIDGE\"\n vlan = 1\n allure.attach(name=\"ssid-rates\", body=str(profile_data[\"rate-limit\"]))\n get_vif_state.append(ssid_name)\n if ssid_name not in get_vif_state:\n allure.attach(name=\"retest,vif state ssid not available:\", body=str(get_vif_state))\n pytest.xfail(\"SSID NOT AVAILABLE IN VIF STATE\")\n lf_tools.add_stations(band=\"5G\", num_stations=5, dut=lf_tools.dut_name, ssid_name=ssid_name)\n lf_tools.Chamber_View()\n wct_obj = lf_test.wifi_capacity(instance_name=\"test_client_wpa2_BRIDGE_tcp_dl\", mode=mode, vlan_id=vlan,\n download_rate=\"1Gbps\", batch_size=\"1,2,5\",\n upload_rate=\"0Gbps\", protocol=\"UDP-IPv4\", duration=\"60000\")\n\n report_name = wct_obj.report_name[0]['LAST'][\"response\"].split(\":::\")[1].split(\"/\")[-1]\n\n lf_tools.attach_report_graphs(report_name=report_name)\n print(\"Test Completed... Cleaning up Stations\")\n assert True", "def scale(self):\n return self._a", "def units(self, size=\"G\", transfer='GB/s'): # YAML", "def d(self):\r\n return self.size.z", "def test_mie_length(self):\n kr = np.array([1, 2])\n lmin = 5\n lmax = 10\n x, y = bessel_sk.mie_bessels(kr, lmin, lmax)\n self.assertEqual(x.shape, (len(kr), lmax-lmin+1))\n self.assertEqual(y.shape, (len(kr), lmax-lmin+1))", "def size_in_gb(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"size_in_gb\")", "def size_in_gb(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"size_in_gb\")", "def size_in_gb(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"size_in_gb\")", "def group_sizes(self):\n return self.g_sizes", "def castSize(self, scale):\n return self.camera.sensorSize * scale", "def get_shipping_weight(self):\n pass", "def _s2bl(size):\n return size**2 // 8 + 1", "def __init__(self):\n self.data = []\n self.min = sys.maxsize", "def sn_size(self):\n return self._faux._sn_size", "def test_9(self):\n for _ in range(1000):\n num_free = np.random.randint(1, 100)\n values = np.random.uniform(-1000.0, 1000.0, size=num_free)\n py = get_scales_magnitudes(values)\n f90 = fort_debug.wrapper_get_scales_magnitude(values, num_free)\n assert_almost_equal(py, f90)", "def test_7_medium(self):\n grid_S = MAPPGridState.create_from_string(\n [\"...#.........\",\n \"...#.........\",\n \"...#.........\",\n \"...########..\",\n \"..12......34.\",\n \"...###..###..\",\n \"...######....\",\n \"........#....\",\n \"........#....\"])\n \n grid_G = MAPPGridState.create_from_string(\n [\"...#.........\",\n \"...#.........\",\n \"...#.........\",\n \"...########..\",\n \"..34......21.\",\n \"...###..###..\",\n \"...######....\",\n \"........#....\",\n \"........#....\"])\n plan = astar(grid_S,\n lambda s : s == grid_G,\n MAPPDistanceSum(grid_G))\n self.assertEqual(36,sum(a.cost for a in plan))", "def SendBufferSize(self) -> int:", "def SendBufferSize(self) -> int:", "def dual_size(k_max: int):\n n = 2 * k_max + 1\n return n" ]
[ "0.56507504", "0.5521999", "0.5457273", "0.5385491", "0.537851", "0.5337603", "0.5316058", "0.5289358", "0.5258439", "0.52287763", "0.52238894", "0.5206532", "0.51933736", "0.5175065", "0.5167415", "0.5160229", "0.514245", "0.509487", "0.50677186", "0.5030789", "0.50188196", "0.50152034", "0.50022423", "0.49988207", "0.4989357", "0.49839726", "0.4977387", "0.49738973", "0.4973375", "0.49632075", "0.49410614", "0.493649", "0.4934313", "0.4920182", "0.4918092", "0.4914635", "0.491018", "0.49098632", "0.49050432", "0.48914367", "0.48728308", "0.48714593", "0.48635384", "0.486196", "0.4860353", "0.48496565", "0.48339322", "0.48319808", "0.48298025", "0.4828908", "0.4815672", "0.48112515", "0.48110747", "0.48058584", "0.4796453", "0.47938222", "0.47794363", "0.47783086", "0.47753134", "0.4771752", "0.47621667", "0.4756332", "0.47436485", "0.474361", "0.4743115", "0.47406203", "0.47364596", "0.47303268", "0.47228974", "0.47219402", "0.47191623", "0.47161725", "0.47158346", "0.47158346", "0.47158346", "0.47158346", "0.47140554", "0.47118038", "0.47114432", "0.47107744", "0.4709053", "0.47072095", "0.47069192", "0.470539", "0.47052813", "0.46947104", "0.46937087", "0.46937087", "0.46937087", "0.46919933", "0.46896362", "0.46853453", "0.46783692", "0.46778506", "0.46757278", "0.46744406", "0.46729755", "0.46721682", "0.46721682", "0.46705502" ]
0.53057754
7
Hypothese on the third feature
def compute_determinant(matrix): det = np.linalg.det(matrix) #if det == 0.: # The det = 0 could be related to the third feature # det = np.linalg.det(matrix[:2, :2]) if det == 0.: # Singular covariance matrix, should not be taken into account det = np.nan if np.isclose(det, 0): det = np.abs(det) return det
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hypotenuse(a, b):\r\n return (a**2 + b**2)**0.5", "def hypotenuse():\n print(math.sqrt(5**2 +3**2))", "def hypotenuse():\n print(math.sqrt(5*5 + 3*3))", "def hyp(a,b):\n c=sqrt((a*a)+(b*b))\n return c", "def hypothenuse(x, y):\n return sqrt(x**2 + y**2)", "def hypotenuse(x,y):\n xx = multiply(x, x)\n yy = multiply(y, y)\n zz = add(xx, yy)\n return sqrt(zz)", "def f(self, X):\n\n return (X[0])**3 - (X[1])**2 + 1", "def hx(self, x):\n yp = np.sqrt(x[0] ** 2 + x[2] ** 2)\n return yp", "def h2_potential(dist: float) -> float:\n pass", "def hapax(self):\n return self.nlegomena(1)", "def get_hypotenuse(a, b):\n return math.sqrt((a**2) + (b**2))", "def euc_dist(self, squared=True):", "def H_complicated(x):\n _ = x**2\n _[0] += np.sin(2*x[1]*x[0])\n _[1] += -3*x[0]**3 + np.log(np.abs(x[0]))\n return _", "def message(self, features, prediction):\r\n if prediction >= 0 and prediction < 2.5:\r\n change_multiplier = 1\r\n elif prediction >= 2.5 and prediction < 5:\r\n change_multiplier = 0.5\r\n elif prediction >= 5 and prediction < 7.5:\r\n change_multiplier = 0.25\r\n else:\r\n change_multiplier = 0.125\r\n\r\n self.check_all_hypothetical(features, change_multiplier , prediction)", "def hangerfunc_old(p,x):\n return ((4.*((x-p[0])* p[1]/p[0])**2. +p[2]**2.)/(1.+4.*((x-p[0])* p[1]/p[0])**2.))*p[3]", "def two_of_three(a, b, c):\n \"*** YOUR CODE HERE ***\"\n return pow(a, 2) + pow(b,2) + pow(c,2) - pow(min(a,b,c),2)", "def enthalpy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_t = liq_g(1,0,temp,pres)\n h = g - temp*g_t\n return h", "def H(self, dstrct):\n new_pop = self.population + dstrct.current_pop\n score = (self.x - new_pop)/self.x\n # Here the absolute value tries to prevent a district that only\n # needs a minor increase in population taking the most populous pt.\n score = 1-abs(score)\n self.H_ = self.w_H * score\n return self.H_", "def hss(self):\n return 2 * (self.table[0, 0] * self.table[1, 1] - self.table[0, 1] * self.table[1, 0]) / (\n (self.table[0, 0] + self.table[0, 1]) * (self.table[0, 1] + self.table[1, 1]) +\n (self.table[0, 0] + self.table[1, 0]) * (self.table[1, 0] + self.table[1, 1]))", "def mo_six_hump_camel_func(x, y):\n x1 = x\n x2 = y\n term1 = (4-2.1*x1**2+(x1**4)/3) * x1**2\n term2 = x1*x2\n term3 = (-4+4*x2**2) * x2**2\n # y0 = np.linalg.norm(xvals)\n\n return term1, term2, term3", "def P_in(triple):\n return np.sqrt(triple.a1**3 / (triple.m1 + triple.m2))", "def front_column_model_p_gain():", "def homo(a):\n return [ a[0]/a[3],\n a[1]/a[3],\n a[2]/a[3],\n 1 ]", "def chi(Mu, Y):\n return Y*(1-hg2f3(Mu,Y))", "def hybolic_distance(self, index, x, y):\n r1, t1 = self.hyperspace[index][x]\n r2, t2 = self.hyperspace[index][y]\n delta_t = (pi - fabs(pi - fabs(t1 - t2))) + 0.0001\n d = r1 + r2 + 2.0 * log(delta_t / 2.0)\n #d = acosh(cosh(r1) * cosh(r2) - sinh(r1) * sinh(r2) * cos(delta_t))\n T, R = self.hyperspace_TR[index]\n #p = 100.0 / (1.0 + exp((d - R) / (2.0 * T))) # percent\n p = 1.0 if d >= R else 0.0\n return p", "def make_hypothetical_prediction(self, features, feature_ind, change):\r\n hypothetical_features = features.copy()\r\n hypothetical_features[feature_ind] += change\r\n return self.make_prediction(hypothetical_features)", "def _etap(self,x):\n return self._eta_sfr_scaling(x,'p_cool') + self._eta_sfr_scaling(x,'p_hot')", "def hchg(x, a1, a2, mu1, mu2):\n a = a1 + a2\n j = np.arange(250)\n if np.isscalar(x):\n x = np.array([x])\n x = x[:, np.newaxis]\n \n out = (mu1 * x) ** j / sp.factorial(j)\n out *= sp.poch(a1+a2, j) / sp.poch(a1, j)\n out *= sp.hyp1f1(a1+a2+j, a2, mu2*(1-x))\n out = out.sum(axis=1)\n return out if out.size > 1 else float(out)", "def k3(self) -> float:\n return self.distortion_coefficients[2]", "def overpotential3(x, doh):\n # | - overpotential3\n dooh = ooh_oh_scaling(doh)\n dg14 = [doh, x, dooh - (x + doh), -dooh + 4.92]\n m = max(dg14)\n return(m - 1.23)\n\n #return doh*do\n #__|", "def overpotential3(x, doh):\n # | - overpotential3\n dooh = ooh_oh_scaling(doh)\n dg14 = [doh, x, dooh - (x + doh), -dooh + 4.92]\n m = max(dg14)\n return(m - 1.23)\n\n #return doh*do\n #__|", "def helmholtzenergy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_p = liq_g(0,1,temp,pres)\n f = g - pres*g_p\n return f", "def coefficient(self) -> float:\n ...", "def haar_feature(i, x, y, f, s):\n features = np.array([[2, 1], [1, 2], [3, 1], [1, 3], [2, 2]])\n h = features[f][0]*s\n w = features[f][1]*s\n\n if f == 0:\n bright = (i[int(x+h/2-1), y+w-1] + i[x-1, y-1]) - (i[x-1, y+w-1] + i[int(x+h/2-1), y-1])\n dark = (i[x+h-1, y+w-1] + i[int(x+h/2-1), y-1]) - (i[int(x+h/2-1), y+w-1] + i[x+h-1, y-1])\n elif f == 1:\n bright = (i[x+h-1, int(y+w/2-1)] + i[x-1, y-1]) - (i[x-1, int(y+w/2-1)] + i[x+h-1, y-1])\n dark = (i[x+h-1, y+w-1] + i[x-1, int(y+w/2-1)]) - (i[x+h-1, int(y+w/2-1)] + i[x-1, y+w-1])\n #print(bright)\n #print(dark)\n haar_feature_val = bright-dark\n #print(haar_feature_val)\n return haar_feature_val", "def cigtab(self, y):\r\n X = [y] if np.isscalar(y[0]) else y\r\n f = [1e-4 * x[0]**2 + 1e4 * x[1]**2 + sum(x[2:]**2) for x in X]\r\n return f if len(f) > 1 else f[0]", "def egim_hesapla(x1, y1, x2, y2):\n\tsonuc = (y2 - y1) / (x2 - x1)\n\tprint float(sonuc)", "def heptagonal(n: int) -> int:\n return int(n * (5 * n - 3) / 2)", "def get_pvalue_thd(self):\n terminals_values = []\n for terminal in self.feature_tree.get_terminals():\n temp = self.get_mannwitneyu_pvalue(terminal)\n terminals_values.append(temp)\n if temp == 1:\n print('non siginificant')\n while 0 in terminals_values:\n terminals_values.remove(0)\n self.pvalue_thd = min(self.pvalue_thd,np.mean(terminals_values))\n #print('pvalue_thd',self.pvalue_thd)", "def weight(self):", "def find_hypotenuse(x, y):\n return math.sqrt(x**2 + y**2)", "def myHeuristic2(state, problem=None):\n #print(\"myHeuristic2\")\n #print(problem.isGoalState((1,1)))\n xy2 = problem.goal\n return ( (state[0] - xy2[0]) ** 2 + (state[1] - xy2[1]) ** 2 ) ** 0.5", "def ThetaFunc(self, x):\n return 0.5 * (np.sign(x) + 1)", "def pythrule(first, second):\n\n return (first * second) / 2", "def feature_energy(wv):\n return np.sqrt(np.sum(wv ** 2, axis=0)).T", "def test_H_hat(self):\n\t\tposition = [0.0, 1.57079, 3.14159, 4.71238, 6.28318, 7.85398, 9.42477]\n\t\tpotential = [0.0, 6.0, 0.0, -6.0, 0.0, 6.0, 0.0]\n\t\tc = 1\n\t\tposition = tf.constant(position, shape = [1, len(position)], dtype = tf.float32)\n\t\tpotential = tf.constant(potential, shape = [1, len(potential)], dtype = tf.float32)\n\t\tbasis = schrodinger.create_basis(5)\n\t\tv = schrodinger.v0(position, potential, basis)\n\t\tcoeff = schrodinger.coefficient(position, basis)\n\t\tv0_hat = tf.linalg.solve(coeff, v)\n\t\tH = schrodinger.H_hat(c, len(basis), v0_hat)\n\t\tself.assertEqual(coeff.get_shape(), [len(basis), len(basis)])", "def homozygotie(self):\n if self.allele[1] == 0.0:\n self.homozygote = True", "def width_h_invis(self):\n if m_higgs > 2.0 * self.mx:\n coupling = self.gsxx * self.stheta / np.sqrt(1 - self.stheta**2)\n\n val = (\n (coupling**2 * (m_higgs**2 - 4 * self.mx**2) ** 1.5)\n / (8.0 * m_higgs**2 * np.pi)\n ).real\n\n assert val >= 0\n\n return val\n else:\n return 0.0", "def hx(self, xhat):\n zp = np.sqrt(xhat[0] ** 2 + xhat[2] ** 2)\n return zp", "def Tc (x,infin, a, nu):\r\n return infin + a* (x ** (-1/nu))", "def harm(x,y):\n return x*y/(x+y)", "def _hypothesis(self, X):\n # * is element wise multiplication\n # numpy.dot(), or @ operator will work\n result = np.transpose(self.theta)@ X \n #emptyResult = np.zeros((1,X.shape[1]))\n return result", "def H(self, t, z):\n # assert (t.ndim == 0) and (z.ndim == 2)\n assert z.size(-1) == 2 * self.n_dof * self.dof_ndim\n\n x, p = z.chunk(2, dim=-1)\n x = x.reshape(-1, self.n_dof, self.dof_ndim)\n p = p.reshape(-1, self.n_dof, self.dof_ndim)\n\n T = EuclideanT(p, self.Minv)\n V = self.compute_V(x)\n return T + V", "def six_hump_camel_func(x, y):\n x1 = x\n x2 = y\n term1 = (4-2.1*x1**2+(x1**4)/3) * x1**2\n term2 = x1*x2\n term3 = (-4+4*x2**2) * x2**2\n\n return term1 + term2 + term3", "def complex_permittivity(self,freq=0,include_conductivity=False):\n if freq == 0:\n return self.epsilon\n elif freq > 0:\n w=2*np.pi*freq\n if include_conductivity:\n return self.epsilon*(1 + 1j * (self.loss_tangent+w/self.conductivity))\n else:\n return self.epsilon*(1 + 1j * self.loss_tangent)", "def functionality(self):\n self._functionality = 0.12 * self.CAMC + 0.22 * self.NOP + 0.22 * self.CIS + 0.22 * self.DSC + 0.22 * self.NOH\n return round(self._functionality, 5)", "def P_out(triple):\n return np.sqrt(triple.a2**3 / (triple.m1 + triple.m2 + triple.m3))", "def t1_hypointensity( x, xsegmentation, xWMProbability, template, templateWMPrior, wmh_thresh=0.1 ):\n mybig = [88,128,128]\n templatesmall = ants.resample_image( template, mybig, use_voxels=True )\n qaff = ants.registration(\n ants.rank_intensity(x),\n ants.rank_intensity(templatesmall), 'SyN',\n syn_sampling=2,\n syn_metric='CC',\n reg_iterations = [25,15,0,0],\n aff_metric='GC', random_seed=1 )\n afftx = qaff['fwdtransforms'][1]\n templateWMPrior2x = ants.apply_transforms( x, templateWMPrior, qaff['fwdtransforms'] )\n cerebrum = ants.threshold_image( xsegmentation, 2, 4 )\n realWM = ants.threshold_image( templateWMPrior2x , 0.1, math.inf )\n inimg = ants.rank_intensity( x )\n parcellateWMdnz = ants.kmeans_segmentation( inimg, 2, realWM, mrf=0.3 )['probabilityimages'][0]\n x2template = ants.apply_transforms( templatesmall, x, afftx, whichtoinvert=[True] )\n parcellateWMdnz2template = ants.apply_transforms( templatesmall,\n cerebrum * parcellateWMdnz, afftx, whichtoinvert=[True] )\n # features = rank+dnz-image, lprob, wprob, wprior at mybig resolution\n f1 = x2template.numpy()\n f2 = parcellateWMdnz2template.numpy()\n f3 = ants.apply_transforms( templatesmall, xWMProbability, afftx, whichtoinvert=[True] ).numpy()\n f4 = ants.apply_transforms( templatesmall, templateWMPrior, qaff['fwdtransforms'][0] ).numpy()\n myfeatures = np.stack( (f1,f2,f3,f4), axis=3 )\n newshape = np.concatenate( [ [1],np.asarray( myfeatures.shape )] )\n myfeatures = myfeatures.reshape( newshape )\n\n inshape = [None,None,None,4]\n wmhunet = antspynet.create_unet_model_3d( inshape,\n number_of_outputs = 1,\n number_of_layers = 4,\n mode = 'sigmoid' )\n\n wmhunet.load_weights( get_data(\"simwmhseg\", target_extension='.h5') )\n\n pp = wmhunet.predict( myfeatures )\n\n limg = ants.from_numpy( tf.squeeze( pp[0] ).numpy( ) )\n limg = ants.copy_image_info( templatesmall, limg )\n lesresam = ants.apply_transforms( x, limg, afftx, whichtoinvert=[False] )\n # lesresam = lesresam * cerebrum\n rnmdl = antspynet.create_resnet_model_3d( inshape,\n number_of_classification_labels = 1,\n layers = (1,2,3),\n residual_block_schedule = (3,4,6,3), squeeze_and_excite = True,\n lowest_resolution = 32, cardinality = 1, mode = \"regression\" )\n rnmdl.load_weights( get_data(\"simwmdisc\", target_extension='.h5' ) )\n qq = rnmdl.predict( myfeatures )\n\n lesresamb = ants.threshold_image( lesresam, wmh_thresh, 1.0 )\n lgo=ants.label_geometry_measures( lesresamb, lesresam )\n wmhsummary = pd.read_csv( get_data(\"wmh_evidence\", target_extension='.csv' ) )\n wmhsummary.at[0,'Value']=lgo.at[0,'VolumeInMillimeters']\n wmhsummary.at[1,'Value']=lgo.at[0,'IntegratedIntensity']\n wmhsummary.at[2,'Value']=float(qq)\n\n return {\n \"wmh_summary\":wmhsummary,\n \"wmh_probability_image\":lesresam,\n \"wmh_evidence_of_existence\":float(qq),\n \"wmh_max_prob\":lesresam.max(),\n \"features\":myfeatures }", "def kappa_t(temp,pres):\n g_p = liq_g(0,1,temp,pres)\n g_pp = liq_g(0,2,temp,pres)\n kappa = -g_pp/g_p\n return kappa", "def __abs__(self):\n return hypot(self.x, self.y)", "def T(p, x):\n y = alpha * mean(p(W))\n if y <= P(x): \n return P(x) \n h = lambda r: alpha * mean(p(alpha*(x - D(r)) + W))\n return fix_point(h, P(x), y)", "def hypot(x, y):\n return 0.0", "def pent( a, b ):\n return P(a) - b", "def heavi(x):\n return 0.5 * (np.sign(x) + 1)", "def hg2f3(Mu,Y):\n return float(mpmath.hyp2f3(0.5,0.5,1.5,2,Mu,-Y**2))", "def get_specific_heat() -> float:\n return 1006.0", "def func3(y, j, h, add_u = 0):\n y_temp = y[j] + add_u\n N = xsize\n k = np.zeros(xsize)\n for i in range(xsize):\n k[i] = -(1/4.)*(1./h)*(y_temp[(i+1)%N]**2-y_temp[(i-1)%N]**2) + (1/2.)*(1./h**2)*(y_temp[(i+1)%N]-2*y_temp[i%N]+y_temp[(i-1)%N])\n return k", "def _holt__(x, xi, p, y, l, b, s, m, n, max_seen):\n alpha, beta, phi, alphac, betac, y_alpha = _holt_init(x, xi, p, y, l, b)\n for i in range(1, n):\n l[i] = (y_alpha[i - 1]) + (alphac * (l[i - 1]))\n return sqeuclidean(l, y)", "def fowlkes_mallows(self):\n return self.pairwise.ochiai_coeff()", "def _model(x, p):\n y_hat = 0\n for i, pi in enumerate(reversed(p)):\n y_hat += x**i * pi\n return y_hat", "def definitive_hotT_method():\n\tT, Xs = load_manticore(soln_2p_plus046, frames=(1, 5))\n\thist_limits = (10, 20)\n\thistxy, stats = gen_hist_and_stats(T,\n\t\t(mtc.get_pacs_mask() & (Xs < 1)),\n\t\tx_lim=hist_limits,\n\t\tsetting=-2\n\t)\n\thot_T = stats[0]\n\terr_hotT = stats[1]\n\treturn hot_T, err_hotT", "def _heuristic(a, b):\n return (b[0] - a[0]) ** 2 + (b[1] - a[1]) ** 2", "def see(p, y, yHat):\n n = y.shape[0]\n numer = ((y - yHat) ** 2).sum()\n denom = n - p - 1\n if (denom == 0):\n s = 0\n elif ( (numer / denom) < 0 ):\n s = 0.001\n else:\n s = (numer / denom) ** 0.5\n return s", "def y(x):\n x1, x2, x3, x4 = x[:, 0], x[:, 1], x[:, 2], x[:, 3]\n return 1 + 0.3 * x1 - 0.6 * x2 ** 2 - 0.2 * x3 ** 3 + 0.5 * x4 ** 4", "def two_of_three(a, b, c):\n \"*** YOUR CODE HERE ***\"\n\n if a>=b:\n x=a\n if b>=c:\n y=b\n else:\n y=c\n else:\n x=b\n if a>=c:\n y=a\n else:\n y=c\n\n return x*x+y*y", "def fitness_key(self, x):\n dv, c3, ventr = x.population.champion.f\n #print dv, c3, ventr\n if dv == 0 and ventr == 0:\n return -10000.0/c3\n else:\n return dv+ c3+ ventr", "def add_complementary_term(self):\n \n approx_Z = 0\n \n non_data_samples = False\n \n if self.num_samples > 0:\n \n non_data_samples = True\n \n approx_Z = self.add_is_approximation()\n \n return approx_Z, non_data_samples", "def homo_line(a, b):\n return (a[1] - b[1], b[0] - a[0], a[0] * b[1] - a[1] * b[0])", "def g(self, X):\n\n return (X[0])**2 - 2*X[0] + X[1]**3 - 2", "def feature_dist_func_dict():\n return {\"tanimoto_dissimilarity\": tanimoto_dissimilarity}", "def hat(J):\n hat=math.sqrt(2*J+1)\n return hat", "def H(x, X, Y, dY):\n\n def L(i):\n #return p[i] * (x ** i)\n p = [(x - X[i]) / (X[j] - X[i]) for j in range(n) if j != i]\n return reduce(op.mul, p)\n\n def dL(i):\n #return d[i-1] * (x ** (i-1))\n if i < n-1:\n return (Y[i+1] - Y[i]) / (X[i+1] - X[i])\n else:\n return (Y[i] - Y[i-1]) / (X[i] - X[i-1])\n\n def A(i):\n return (1 - 2 * (x - X[i]) * dL(i)) * (L(i) ** 2)\n\n def B(i):\n return (x - X[i]) * (L(i) ** 2)\n\n assert(len(X) != 0 and len(X) == len(Y)), 'Quantidade de valores em X e Y diferentes'\n n = len(X)\n #p = interpolate.lagrange(X, Y)\n #d = polyder(p)\n h1 = sum(A(i) * Y[i] for i in range(n))\n h2 = sum(B(i) * dY[i] for i in range(n))\n return h1 + h2", "def poincare_to_hyperbolic(y):\n norm = np.linalg.norm(y)\n x = np.zeros(y.shape)\n x[:-1] = y\n x[-1] = (1 + (norm ** 2)) / 2\n return (2 / (1 - (norm ** 2))) * x", "def prob1():\n x, y = sy.symbols('x, y')\n return sy.Rational(2,5) * sy.exp(x**2 - y) * sy.cosh(x + y) + \\\n sy.Rational(3,7) * sy.log(x*y + 1)", "def probit_phi(x):\n mu = 0;sd = 1;\n return 0.5 * (1 + tsr.erf((x - mu) / (sd * tsr.sqrt(2))))", "def verhulst(nb_init, t0, tf, eps, methode, gamma, K) :\n f=lambda y,t : gamma*y*(1-y/K)\n Y=meth_epsilon(nb_init, t0, tf, eps, f, methode)\n return Y", "def arctanh(x):\n raise NotImplementedError", "def phs(x, y, rbfParam) :\n return (x**2 + y**2) ** (rbfParam/2)", "def conditional_entropy_hyper(self) -> float:\n pass", "def classifier(x):\n return x[0] - x[1] + 4 < 0", "def entropy_difference(feature, answers, num_lemma):\n f_max = np.max(feature)\n f_min = np.min(feature)\n # check is it unsound feature\n if f_max == f_min:\n # print('lemma 0: ', num_lemma)\n return 10000\n step = (f_max - f_min) / 1000\n p = [[0, 0] for _ in range(1000)]\n sum_p = len(feature)\n for j in range(len(feature)):\n index = math.trunc((feature[j] - f_min)/step)\n if index == 1000:\n index = 999\n p[index][answers[j]] += 1\n # difference between entropy feature+answers and just feature\n result = 0\n for i in range(1000):\n if (p[i][0] != 0) & (p[i][1] != 0):\n result += math.log2((p[i][0] + p[i][1]) / sum_p) * (p[i][0] + p[i][1]) / sum_p - \\\n math.log2(p[i][0] / sum_p) * (p[i][0]) / sum_p - \\\n math.log2(p[i][1] / sum_p) * (p[i][1]) / sum_p\n # entropy answers\n all_answers = len(answers)\n positive_answers = sum(answers) / all_answers\n negative_answers = 1 - positive_answers\n if (positive_answers == 0) or negative_answers == 0:\n entropy_answers = 0\n else:\n entropy_answers = - positive_answers * math.log2(positive_answers) - \\\n negative_answers * math.log2(negative_answers)\n\n # difference between (feature entropy + answers entropy) and (feature + answers) entropy\n if entropy_answers - result < 0:\n print('negative information', num_lemma, entropy_answers - result)\n return - (entropy_answers - result)", "def two_of_three(a, b, c):\n return a ** 2 + b ** 2 + c ** 2 - (min(a, b, c) ** 2)", "def I(x):\n if abs(x-L/2.0) > 0.1:\n return 0\n else:\n return 1", "def hyperbolic_to_poincare(x):\n return (1 / (x[-1] + 1)) * x[:-1]", "def curve_no_hillcoef(ph, pka):\n# return ph - pka\n return 1/(10**(pka-ph)+1)", "def p_funct(self, yi, p, t): \t\t\n\t\t\n\t\tglobal Cg \n\t\tCg = abs(yi[0])\n\t\tglobal Ch \n\t\tCh = abs(yi[1])\n\t\tglobal tin_g \n\t\ttin_g = abs(yi[2])\n\t\tglobal tin_h \n\t\ttin_h = abs(yi[3])\n\t\t\n\t\ty = state_at(t)\n\t\trisk = 1 - (y[-1][0] + y[-1][3]) / sum(y[-1])\n\t\t\n\t\treturn abs(risk - p)", "def hg1f2(Mu,Y):\n return float(mpmath.hyp1f2(0.5,2,Mu,-Y**2))", "def topography(x,y):\n \n z = -x/10\n \n N = len(x)\n for i in range(N):\n # Step\n if 10 < x[i] < 12:\n z[i] += 0.4 - 0.05*y[i]\n \n # Constriction\n if 27 < x[i] < 29 and y[i] > 3:\n z[i] += 2\n \n # Pole\n if (x[i] - 34)**2 + (y[i] - 2)**2 < 0.4**2:\n z[i] += 2\n \n return z", "def hw_func(self):\n i, o = self.inl[0].to_flow(), self.outl[0].to_flow()\n\n if abs(i[0]) < 1e-4:\n return i[1] - o[1]\n\n v_i = v_mix_ph(i, T0=self.inl[0].T.val_SI)\n v_o = v_mix_ph(o, T0=self.outl[0].T.val_SI)\n flow_dir = np.sign(i[0])\n\n return ((i[1] - o[1]) * flow_dir -\n (10.67 * abs(i[0]) ** 1.852 * self.L.val /\n (self.ks.val ** 1.852 * self.D.val ** 4.871)) *\n (9.81 * ((v_i + v_o) / 2) ** 0.852))", "def theta():\n pass", "def tlwh(mean):\n ret = mean[:4].copy()\n ret[2] *= ret[3]\n ret[:2] -= ret[2:]/2\n return ret", "def theta_ft(h,ft_intercept,gamma):\n theta_top = ft_intercept + h*gamma\n return theta_top" ]
[ "0.6345466", "0.6224685", "0.62164116", "0.6141258", "0.5938721", "0.57951844", "0.5715218", "0.5696992", "0.56795216", "0.56696415", "0.5644957", "0.5635818", "0.561018", "0.56082743", "0.5606991", "0.56034803", "0.5592059", "0.55717176", "0.55676854", "0.55559623", "0.5550272", "0.5542381", "0.55281615", "0.55194306", "0.5517465", "0.54838336", "0.54826915", "0.5451977", "0.5442291", "0.5426091", "0.5426091", "0.5417501", "0.53996253", "0.5396238", "0.5386839", "0.5383335", "0.537517", "0.5365629", "0.536492", "0.5354958", "0.5350068", "0.53499776", "0.53478795", "0.53470886", "0.5346251", "0.53426427", "0.5337469", "0.5334943", "0.5329416", "0.5329099", "0.53258014", "0.5323664", "0.5318818", "0.5299182", "0.5294535", "0.5285676", "0.526808", "0.5261601", "0.52586097", "0.52574295", "0.5257273", "0.52540874", "0.5252986", "0.52489245", "0.52484155", "0.52450603", "0.5240126", "0.52363527", "0.5236024", "0.52358997", "0.5233663", "0.52312577", "0.52296567", "0.5224636", "0.5223567", "0.52232134", "0.5222485", "0.5218717", "0.5216835", "0.52100277", "0.52058387", "0.5205772", "0.52014023", "0.519856", "0.5196913", "0.51967835", "0.5194381", "0.5192875", "0.5189953", "0.5182173", "0.51815385", "0.518035", "0.517965", "0.51791924", "0.51789623", "0.5176867", "0.5176787", "0.51756716", "0.5171565", "0.5170648", "0.5169758" ]
0.0
-1
Method to compute the KullbackLeibler Divergence between two Gaussians PARAMETERS
def compute_kl(mu1, mu2, sigma1, sigma2): k = len(mu1) try: term1 = np.trace(np.matmul(np.linalg.inv(sigma2), sigma1)) term2 = np.matmul(np.matmul((mu2 - mu1).T, np.linalg.inv(sigma2)), (mu2 - mu1)) det_sigma1 = compute_determinant(sigma1) det_sigma2 = compute_determinant(sigma2) # term3 = np.log(np.linalg.det(sigma2)/np.linalg.det(sigma1)) term3 = np.log(det_sigma2) term4 = np.log(det_sigma1) kl = (term1 + term2 - k + term3 - term4)/2. return kl except np.linalg.LinAlgError: return np.nan
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_KL_divergence(self):\n KL_loss_W = Vil.get_KL_divergence_Samples(self.mu_weight, Vil.softplus(self.rho_weight), self.weight, self.prior)\n KL_loss_b = 0\n if self.bias is not None:\n KL_loss_b = Vil.get_KL_divergence_Samples(self.mu_bias, Vil.softplus(self.rho_bias), self.bias, self.prior)\n \n KL_loss = KL_loss_W + KL_loss_b\n \n return KL_loss", "def kl_div_prior_gradient(self, posterior_logits, posterior_binary_samples):\n #DVAE Eq11 - gradient of prior\n #gradient of the KLD between posterior and prior wrt to prior\n #parameters theta, i.e. generative model parameters.\n #logits to probabilities\n posterior_probs=torch.sigmoid(posterior_logits)\n positive_probs=posterior_probs.detach()\n \n #samples from posterior are labelled positive\n positive_samples=posterior_binary_samples.detach()\n\n n_split=positive_samples.size()[1]//2\n positive_samples_left,positive_samples_right=torch.split(positive_samples,split_size_or_sections=int(n_split),dim=1)\n \n #-z_left^t J z_right\n pos_first_term=torch.matmul(positive_samples_left,self.prior.get_weights())*positive_samples_right\n \n rbm_bias_left=self.prior.get_visible_bias()\n rbm_bias_right=self.prior.get_hidden_bias()\n rbm_bias=torch.cat([rbm_bias_left,rbm_bias_right])#self._h\n \n #this gives [42,400] size\n #- z^t h\n #TODO this uses positive probs. Should it not use positive samples?\n # FIXME an indication are the negative ones where samples are used! On\n #other hand this is the only place this this used\n pos_sec_term=positive_probs*rbm_bias\n # pos_sec_term=positive_samples*rbm_bias\n\n # Energy = -z_left^t J z_right - z^t h\n pos_kld_per_sample=-(torch.sum(pos_first_term,axis=1)+torch.sum(pos_sec_term,axis=1))\n #samples from rbm are labelled negative\n\n #rbm_samples Tensor(\"zeros:0\", shape=(200, 200), dtype=float32)\n #this returns the full RBM set: left and right nodes concatenated\n\n #TODO What are these samples here?\n #TODO what's the impact of doing gibbs sampling here? does this make\n #sense?\n rbm_samples=self.prior.get_samples_kld(approx_post_samples=positive_samples_left,n_gibbs_sampling_steps=1)\n negative_samples=rbm_samples.detach()\n\n # print(self.prior.get_weights())\n n_split=negative_samples.size()[1]//2\n negative_samples_left,negative_samples_right=torch.split(negative_samples,split_size_or_sections=int(n_split),dim=1)\n neg_first_term=torch.matmul(negative_samples_left,self.prior.get_weights())*negative_samples_right\n \n #FIXME see above, the positive case looks different. Why?\n neg_sec_term=negative_samples*rbm_bias\n neg_kld_per_sample=(torch.sum(neg_first_term,axis=1)+torch.sum(neg_sec_term,axis=1))\n \n kld_per_sample=pos_kld_per_sample+neg_kld_per_sample\n\n return kld_per_sample", "def KL_divergence(model_1, model_2, samples):\n posterior_1 = create_posterior_object(model_1, samples)\n posterior_2 = create_posterior_object(model_2, samples)\n return posterior_1.KL(posterior_2)", "def kl_divergence(self, samples):\n # Check size of input\n if not len(samples.shape) == 2:\n raise ValueError('Given samples list must be n x 2.')\n if samples.shape[1] != self._n_parameters:\n raise ValueError(\n 'Given samples must have length ' + str(self._n_parameters))\n\n best_mode = np.zeros(samples.shape[0])\n for i in range(samples.shape[0]):\n a_sample = samples[i, :]\n a_log_pdf = -np.inf\n a_max_index = -1\n for j, var in enumerate(self._vars):\n a_test_log_pdf = var.logpdf(a_sample)\n if a_test_log_pdf > a_log_pdf:\n a_log_pdf = a_test_log_pdf\n a_max_index = j\n best_mode[i] = a_max_index\n\n kl = np.zeros(len(self._vars))\n for i in range(len(self._vars)):\n y = np.array(samples[best_mode == i, :], copy=True)\n # when a mode has no points use all samples\n if y.shape[0] == 0:\n y = np.array(samples, copy=True)\n m0 = np.mean(y, axis=0)\n s0 = np.cov(y.T)\n s1 = self._covs[i]\n m1 = self._modes[i]\n s1_inv = np.linalg.inv(s1)\n if len(np.atleast_1d(s0)) > 1:\n kl[i] = 0.5 * (\n np.trace(np.matmul(s1_inv, s0)) +\n np.matmul(np.matmul(m1 - m0, s1_inv), m1 - m0) -\n np.log(np.linalg.det(s0)) +\n np.log(np.linalg.det(s1)) -\n self._n_parameters)\n else:\n kl[i] = 0.5 * (\n np.sum(s1_inv * s0) +\n (m1 - m0) * s1_inv * (m1 - m0) -\n np.log(s0) +\n np.log(s1) -\n 1)\n return kl", "def kl_divergence(self, params_q, params_p):\n means_q = params_q[:, :, 0]\n log_std_q = params_q[:, :, 1]\n\n means_p = params_p[:, :, 0]\n log_std_p = params_p[:, :, 1]\n\n std_q = torch.exp(log_std_q)\n std_p = torch.exp(log_std_p)\n\n kl_div = log_std_p - log_std_q + (std_q ** 2 + (means_q - means_p) ** 2) / (2.0 * std_p ** 2) - 0.5\n\n return kl_div.sum(dim=-1)", "def __init__(self, name='backward_kl_divergence', **kwargs):\n\n super(BackwardKLDivergence, self).__init__(name=name, **kwargs)\n\n return", "def kl_div_posterior_gradient(self, posterior_logits, posterior_binary_samples):\n #DVAE Eq12\n #gradient of the KLD between posterior and prior wrt to posterior\n #parameters phi\n \n logger.debug(\"kl_div_posterior_gradient\")\n posterior_upper_bound = 0.999*torch.ones_like(posterior_logits)\n #logits to probabilities\n posterior_probs=torch.min(posterior_upper_bound, torch.sigmoid(posterior_logits))\n \n n_split=int(posterior_binary_samples.size()[1]//2)\n #binarised samples from posterior to RBM layers\n rbm_samples_left,rbm_samples_right=torch.split(posterior_binary_samples,split_size_or_sections=n_split,dim=1)\n\n #the following prepares the variables in the calculation in tehir format\n rbm_bias_left=self.prior.get_visible_bias()\n rbm_bias_right=self.prior.get_hidden_bias()\n\n rbm_bias=torch.cat([rbm_bias_left,rbm_bias_right])#self._h\n rbm_weight=self.prior.get_weights()#self._J\n\n # this is transposed, so we multiply what we call \"right hand\" (\"hidden layer\")\n # samples with right rbm nodes\n # rbm_weight_t=torch.transpose(rbm_weight,0,1)#self._J\n \n rbm_activation_right=torch.matmul(rbm_samples_right,rbm_weight.t())\n rbm_activation_left=torch.matmul(rbm_samples_left,rbm_weight)\n\n #corresponds to samples_times_J\n rbm_activation=torch.cat([rbm_activation_right,rbm_activation_left],1)\n \n #TODO what is this scaling factor?\n #[400,400] \n hierarchy_scaling= (1.0 - posterior_binary_samples) / (1.0 - posterior_probs)\n hierarchy_scaling_left,hierarchy_scaling_right=torch.split(hierarchy_scaling, split_size_or_sections=int(n_split),dim=1)\n \n #TODO why does this happen? This seems to scale only the left side of\n #the RBM. Th right side is replaced with ones.\n hierarchy_scaling_with_ones=torch.cat([hierarchy_scaling_left,torch.ones(hierarchy_scaling_right.size())],axis=1)\n \n with torch.no_grad():\n undifferentiated_component=posterior_logits-rbm_bias-rbm_activation*hierarchy_scaling_with_ones\n undifferentiated_component=undifferentiated_component.detach()\n \n kld_per_sample = torch.sum(undifferentiated_component * posterior_probs, dim=1)\n\n return kld_per_sample", "def kl(self):\n weights_logvar = self.weights_logvar\n kld_weights = self.prior_stdv.log() - weights_logvar.mul(0.5) + \\\n (weights_logvar.exp() + (self.weights.pow(2) - self.prior_mean)) / (\n 2 * self.prior_stdv.pow(2)) - 0.5\n kld_bias = self.prior_bias_stdv.log() - self.bias_logvar.mul(0.5) + \\\n (self.bias_logvar.exp() + (self.bias.pow(2) - self.prior_bias_mean)) / (\n 2 * self.prior_bias_stdv.pow(2)) \\\n - 0.5\n return kld_weights.sum() + kld_bias.sum()", "def kl_divergence(self) -> Tensor:\n return self.variational_strategy.kl_divergence().sum(dim=1).mean()", "def kl_divergence(means: Tensor, logvars: Tensor) ->Tensor:\n kl_cost = -0.5 * (logvars - means ** 2 - torch.exp(logvars) + 1.0)\n kl_cost = torch.mean(kl_cost, 0)\n return torch.sum(kl_cost)", "def kl_divergence(self) -> Tensor:\n return torch.tensor(0.0)", "def func_ludwigson(eps,k1,n1,k2,n2,):\n return k1*eps**n1+np.exp(k2+n2*eps)", "def get_KL_divergence(self):\n KL_loss = 0\n if(self.Bayesian):\n for i in range(self.num_layers):\n KL_loss += getattr(self, 'LSTMCell%i'%(i+1)).get_KL_divergence()\n \n return KL_loss", "def EffectSizeKullbackLeibler(weights, means, sigmas):\n kl = 0\n Q = len(weights)\n\n # Normalize weights\n sum_weights = np.sum(weights)\n weights = weights / sum_weights\n\n # Pair each component in the control group to the closest component in the intervention group\n for i in range(Q):\n m1, s1, w1 = means[0,i] , sigmas[0,i], weights[0,i]\n kl_scores = np.zeros((Q))\n for j in range(len(weights)):\n m2, s2, w2 = means[1,j], sigmas[1,j], weights[1,j]\n kl_univariate = KullbackLeiblerUnivariateGaussian(m1,m2,s1,s2)\n kl_scores[j] = kl_univariate + np.log(w1/w2)\n\n kl += w1 * kl_scores.min()\n return kl", "def kl_dist_smoothing(distribution1: np.array, distribution2: np.array, epsilon: float) -> float:\n # Performs smoothing\n distributions = [distribution1, distribution2]\n smoothed_distributions = []\n for distribution in distributions:\n nonzeros = np.count_nonzero(distribution)\n zeros = len(distribution) - nonzeros\n smoothed_distributions.append([epsilon if prob == 0 else prob - zeros * epsilon / nonzeros\n for prob in distribution])\n\n return sum(kl_div(smoothed_distributions[0], smoothed_distributions[1]))", "def Kernel(x, y):\n\n Result = (np.dot(x_train[x, :], x_train[y, :])+1)**5 # Polynomial\n #Result = (np.dot(x_train[x, :], x_train[y, :])+1) # Linear\n #Gaussian\n \"\"\"\n sigma = 1\n if np.ndim(x_train[x, :]) == 1 and np.ndim(x_train[y, :]) == 1:\n Result = np.exp(- (np.linalg.norm(x_train[x, :] - x_train[y, :], 2)) ** 2 / (2 * sigma ** 2))\n elif (np.ndim(x_train[x, :]) > 1 and np.ndim(x_train[y, :]) == 1) or (np.ndim(x_train[x, :]) == 1 and np.ndim(x_train[y, :]) > 1):\n Result = np.exp(- (np.linalg.norm(x_train[x, :] - x_train[y, :], 2, axis=1) ** 2) / (2 * sigma ** 2))\n elif np.ndim(x_train[x, :]) > 1 and np.ndim(x_train[y, :]) > 1:\n Result = np.exp(- (np.linalg.norm(x[:, np.newaxis] - y[np.newaxis, :], 2, axis=2) ** 2) / (2 * sigma ** 2))\n \"\"\"\n return Result", "def kl_divergence(self, post_logits, post_samples, is_training=True):\n logger.debug(\"GumBolt::kl_divergence\")\n \n # Concatenate all hierarchy levels\n logits_q_z = torch.cat(post_logits, 1)\n post_zetas = torch.cat(post_samples, 1)\n \n # Compute cross-entropy b/w post_logits and post_samples\n cross_entropy = - self._bce_loss(logits_q_z, post_zetas)\n cross_entropy = torch.mean(torch.sum(cross_entropy, 1), 0)\n \n # Compute positive energy expval using hierarchical posterior samples\n \n # Number of hidden and visible variables on each side of the RBM\n num_var_rbm = (self.n_latent_hierarchy_lvls * self._latent_dimensions)//2\n \n # Compute positive energy contribution to the KL divergence\n post_zetas_vis, post_zetas_hid = post_zetas[:, :num_var_rbm], post_zetas[:, num_var_rbm:]\n pos_energy = self.energy_exp(post_zetas_vis, post_zetas_hid)\n \n # Compute gradient contribution of the logZ term\n rbm_visible_samples, rbm_hidden_samples = self.sampler.block_gibbs_sampling()\n rbm_vis, rbm_hid = rbm_visible_samples.detach(), rbm_hidden_samples.detach()\n neg_energy = - self.energy_exp(rbm_vis, rbm_hid)\n \n kl_loss = cross_entropy + pos_energy + neg_energy\n return kl_loss, cross_entropy, pos_energy, neg_energy", "def kl_divergence_from_logits(self, logits_a, logits_b):\n distribution1 = tf.contrib.distributions.Categorical(logits=logits_a)\n distribution2 = tf.contrib.distributions.Categorical(logits=logits_b)\n return tf.contrib.distributions.kl_divergence(distribution1, distribution2)", "def kl_divergence(x,y):\n\tassert (isinstance(x, BayesNet) and isinstance(y, BayesNet)), 'Must pass in BayesNet objects.'\n\tassert (x==y), 'Passed-in BayesNet objects are not structurally equal.'\n\n\tdistance = np.sum( x.flat_cpt() * np.log( x.flat_cpt() / y.flat_cpt() ) )\n\treturn distance", "def _Kgradients(self):\r\n dL_dfhat, I_KW_i = self._shared_gradients_components()\r\n dlp = self.noise_model.dlogpdf_df(self.f_hat, self.data, extra_data=self.extra_data)\r\n\r\n #Explicit\r\n #expl_a = np.dot(self.Ki_f, self.Ki_f.T)\r\n #expl_b = self.Wi_K_i\r\n #expl = 0.5*expl_a - 0.5*expl_b\r\n #dL_dthetaK_exp = dK_dthetaK(expl, X)\r\n\r\n #Implicit\r\n impl = mdot(dlp, dL_dfhat, I_KW_i)\r\n\r\n #No longer required as we are computing these in the gp already\r\n #otherwise we would take them away and add them back\r\n #dL_dthetaK_imp = dK_dthetaK(impl, X)\r\n #dL_dthetaK = dL_dthetaK_exp + dL_dthetaK_imp\r\n #dL_dK = expl + impl\r\n\r\n #No need to compute explicit as we are computing dZ_dK to account\r\n #for the difference between the K gradients of a normal GP,\r\n #and the K gradients including the implicit part\r\n dL_dK = impl\r\n return dL_dK", "def compute_gradient(Y, mi, latent_Sigmas, B1, B2, ss, mu, g1, g2, sigma2, index):\n\n # these are the 's' parameters when nu=e_q, beta=0\n si = -mi[-1]*ss[index][-1]\n Sigma = latent_Sigmas[index]\n yi = Y[index]\n\n B1_plus_B2 = B1 + B2\n\n b0 = -mi.reshape(-1, 1) + 1/(2*sigma2)*np.matmul(B1_plus_B2.T, (yi-mu).T)\n\n b1 = (SQRT_PI_OVER_2-1-si**2)*np.exp(-si**2/2)*g1*np.trace(\n np.matmul(\n np.matmul(\n B1.T, B1\n ),\n Sigma\n )\n )\n\n b2 = si*np.exp(-si**2/2)*g2*np.trace(\n np.matmul(\n np.matmul(\n B2.T,\n B2\n ),\n Sigma\n )\n )\n\n B1TB1 = np.matmul(B1.T, B1)\n B2TB2 = np.matmul(B2.T, B2)\n\n b3 = np.exp(-si**2/2)*np.matmul(\n mi.T,\n np.matmul(\n B2TB2 - B1TB1,\n mi\n )\n )\n\n b4 = ROOT2PI*np.matmul(\n mi,\n erfc(si/ROOT2)*B1TB1 + (erf(si/ROOT2)+1)*B2TB2\n ).reshape(-1, 1)\n\n # qth_terms = [-var[-1]*eq*(item1 + item2 + item3) for var, item1, item2, item3 in zip(latent_variances, b1, b2, b3)]\n\n # return sum([outer_term + (TWOPI)**(1/2-q)/(2*sigma2)*inner_term for outer_term, inner_term in zip(b0, inner_terms)])\n\n result = b0 + b4\n\n # update the qth element with the corresponding derivative elements\n result[-1] += (TWOPI)**(1/2-q)/(2*sigma2)*(b1 + b2 + b3)\n\n return result.flatten()", "def _graph_fn_kl_divergence(distribution_a, distribution_b):\n if get_backend() == \"tf\":\n return tf.no_op()\n # TODO: never tested. tf throws error: NotImplementedError: No KL(distribution_a || distribution_b) registered for distribution_a type Bernoulli and distribution_b type ndarray\n #return tf.distributions.kl_divergence(\n # distribution_a=distribution_a,\n # distribution_b=distribution_b,\n # allow_nan_stats=True,\n # name=None\n #)", "def kl_gaussian_gaussian_analytic(mu_q, logvar_q, mu_p, logvar_p):\n # init\n batch_size = mu_q.size(0)\n input_size = mu_q.size(1)\n mu_q = mu_q.view(batch_size, -1)\n logvar_q = logvar_q.view(batch_size, -1)\n mu_p = mu_p.view(batch_size, -1)\n logvar_p = logvar_p.view(batch_size, -1) \n\n # kld\n cov_q = torch.exp(logvar_q)\n cov_p = torch.exp(logvar_p)\n cov_p_inverse = 1 / cov_p\n mu_diff = mu_p - mu_q\n log_det_cov_p = torch.sum(logvar_p, dim=1)\n log_det_cov_q = torch.sum(logvar_q, dim=1)\n trace_det = torch.sum(cov_p_inverse * cov_q, dim=1)\n fourth_term = torch.sum(mu_diff * cov_p_inverse * mu_diff, dim=1)\n kl_div = 0.5 * (log_det_cov_p - log_det_cov_q - input_size + trace_det + fourth_term)\n return kl_div", "def zkl_divergence(x, y, gamma):\n return np.sum([p_i*np.log(p_i/q_i) if q_i > 0 and p_i > 0 else p_i*gamma for (p_i, q_i) in zip(x, y)])", "def method_2d(knots,y_n,num):\n cv_iter = 10 # number of iteration for cross-validation \n GSV = np.zeros((cv_iter,cv_iter))\n# tr = np.zeros((cv_iter,cv_iter))\n# fun =np.zeros((cv_iter,cv_iter))\n lam_x = np.linspace(0,0.2,cv_iter)\n lam_y = np.linspace(0,0.2,cv_iter)\n num_knots = len(knots)\n linear_knots = knots[1:num_knots-1]\n num_knots = num_knots-4\n znam = np.zeros((num_knots))\n basis = np.zeros((num,num_knots))\n basis_1 = np.zeros((num,num_knots))\n basis_deriative = np.zeros((num,num_knots))\n basis_deriative_1 = np.zeros((num,num_knots))\n S = np.zeros((num_knots,num_knots,num))\n vs = BsplineVectorSpace(2, knots)\n vs_1 = BsplineVectorSpace(1, linear_knots)\n I_i = np.eye(num_knots)\n for i in xrange(0,num_knots):\n basis[:,i] = vs.basis_der(i,0)(np.linspace(0,1,num))\n basis_deriative[:,i] = vs.basis_der(i,1)(np.linspace(0,1,num))/num\n basis_1[:,i] = vs_1.basis_der(i,0)(np.linspace(0,1,num))\n basis_deriative_1[:,i] = vs_1.basis_der(i,1)(np.linspace(0,1,num))/num\n B = abs(basis_deriative-basis_1)\n S = np.zeros((num_knots,num_knots,num))\n k = np.zeros((num_knots,num_knots,num))\n for i in xrange(num_knots):\n for j in xrange(num_knots):\n S[i,j,:] = B[:,i]*B[:,j]\n k[i,j,:] =basis_deriative_1[:,i] * basis_deriative_1[:,j]\n S_int = np.zeros((num_knots,num_knots))\n k_int = np.zeros((num_knots,num_knots))\n for i in xrange(num_knots):\n for j in xrange(num_knots):\n S_int[i,j] = integrate.trapz(S[i,j,:])\n k_int[i,j] = integrate.trapz(k[i,j,:])\n basis_product = np.kron(basis,basis)\n S_x = np.kron(S_int,I_i)\n S_y = np.kron(I_i,S_int)\n K_x = np.kron(k_int,I_i)\n K_y = np.kron(I_i,k_int)\n for i in xrange(cv_iter):\n for j in xrange(cv_iter):\n influence_matrix = np.dot(np.dot(basis_product,(np.linalg.inv(np.dot(np.transpose(\n basis_product),basis_product)+lam_x[i]*S_x+lam_y[j]*S_y+lam_x[i]* K_x+lam_y[j]*K_y))),np.transpose(basis_product))\n for k in xrange(num_knots):\n znam[k] =(1-influence_matrix[k,k])**2\n tr = np.sum(znam)\n fun = np.sum((y_n-np.dot(influence_matrix,y_n))**2)\n GSV[i,j] =fun/(num*tr)\n print i,j\n a,b = np.unravel_index(GSV.argmin(), GSV.shape)\n# a = np.argmin(np.argmin(GSV,axis = 0))\n# b = np.argmin(np.argmin(GSV,axis = 1))\n lamb_x = lam_x[a]\n lamb_y = lam_y[b]\n print lamb_x,lamb_y\n model_fit = np.dot(np.dot(np.dot(basis_product,(np.linalg.inv(np.dot(np.transpose(\n basis_product),basis_product)+lamb_x*S_x+lamb_y*S_y+lamb_x* K_x+lamb_y*K_y))),np.transpose(basis_product)),y_n)\n return model_fit,GSV", "def MyKLD(X,Y): \n mu1,mu2 = tuple(np.mean(X,axis=0))\n sigma1,sigma2 = tuple(np.std(X,axis=0))\n m1,m2 = tuple(np.mean(X,axis=0))\n s1,s2 = tuple(np.std(X,axis=0))\n rho = np.corrcoef(X,rowvar=False)[0,1]\n r = np.corrcoef(Y,rowvar=False)[0,1]\n \n return (\n ((mu1-m1)**2/s1**2 - 2*r*(mu1-m1)*(mu2-m2)/(s1*s2) + (mu2-m2)**2/s2**2) /\n (2 * (1 - r**2)) +\n ((sigma1**2-s1**2)/s1**2 - 2*r*(rho*sigma1*sigma2-r*s1*s2)/(s1*s2) + \n (sigma2**2-s2**2)/s2**2) /\n (2 * (1 - r**2)) +\n np.log((s1**2 * s2**2 * (1-r**2)) / (sigma1**2 * sigma2**2 * (1-rho**2))) / 2\n )", "def kl_divergence(a, b, normalize=True):\n a, b = np.array(a), np.array(b)\n\n x = np.linspace(\n min(a.min(), b.min()) - 1,\n max(a.max(), b.max()) + 1,\n 100\n )\n\n p = gaussian_kde(a)(x)\n q = gaussian_kde(b)(x)\n\n if normalize:\n p = p/np.sum(p)\n q = q/np.sum(q)\n\n return np.sum(np.where(p != 0, (p) * np.log(p / q), 0))", "def _kld_gauss(self, mean_1, std_1, mean_2, std_2):\n kld_element = (2 * torch.log(std_2) - 2 * torch.log(std_1) + (std_1.pow(2) + (mean_1 - mean_2).pow(2)) / std_2.pow(2) - 1)\n return\t0.5 * torch.sum(kld_element)", "def JSDivergence(p_output, q_output, get_softmax=True, dim=1):\n if get_softmax:\n p_output = F.softmax(p_output, dim=dim)\n # q_output = F.softmax(q_output, dim=dim)\n log_mean_output = ((p_output + q_output )/2).log()\n return (\n F.kl_div(log_mean_output, p_output, reduction='batchmean') + \n F.kl_div(log_mean_output, q_output, reduction='batchmean')\n ) / 2", "def rbf_kernel(x_1, x_2, l):\n\n\tassert l > 0, \"The hyperparameter l must be > 0\"\n\tdist = euclidean_distances(x_1.reshape(-1,1), x_2.reshape(-1,1))\n\treturn np.exp(dist**2 / -(2*l**2))", "def kl_gauss(x, y, sig2=1.):\n return (x - y) ** 2 / (2 * sig2)", "def kl_bern(x, y):\n x = min(max(x, eps), 1-eps)\n y = min(max(y, eps), 1-eps)\n return x*log(x/y) + (1-x)*log((1-x)/(1-y))", "def kl_divergence(self, logits_q, logits_p):\n return (torch.exp(logits_q) * (logits_q - logits_p)).sum(1, keepdim=True)", "def kl_divergence(p_dist, q_dist, n_samples_per_axis=30, n_axis=2):\r\n global COUNTER\r\n if n_axis == 2:\r\n x = np.linspace(-1.0, 1.0, n_samples_per_axis)\r\n y = np.linspace(-1.0, 1.0, n_samples_per_axis)\r\n grids = np.meshgrid(x, y)\r\n elif n_axis == 3:\r\n x = np.linspace(-1.0, 1.0, n_samples_per_axis)\r\n y = np.linspace(-1.0, 1.0, n_samples_per_axis)\r\n z = np.linspace(-1.0, 1.0, n_samples_per_axis)\r\n grids = np.meshgrid(x, y, z)\r\n elif n_axis == 1:\r\n grids = np.linspace(-1.1, 1.1, 120)\r\n print(\"Grid complete!\")\r\n if n_axis != 1:\r\n grid = np.vstack(grids).reshape((n_axis, n_samples_per_axis**n_axis)).T\r\n else:\r\n grid = grids\r\n grid = np.reshape(grid, (grid.shape[0], 1))\r\n probs_p = np.exp(p_dist.score_samples(grid))\r\n probs_q = np.exp(q_dist.score_samples(grid))\r\n print(\"prob_calc_complete\")\r\n kl = entropy(probs_p, probs_q)\r\n return kl", "def test_kl_divergence(get_distributions):\n for i, dist_a in enumerate(get_distributions):\n for j, dist_b in enumerate(get_distributions):\n kl = kl_divergence(dist_a, dist_b)\n if i == j:\n assert pytest.approx(kl, 0.0001) == 0.0\n else:\n assert kl > 0", "def find_knee(x,y):\n\n # find ranges\n if len(x) != len(y):\n raise Exception(\"bad data\")\n tot_len = len(x)\n \n \n \n # fit strait lines to both\n\n # find intercept\n knee_r = (f_top.beta[1] - f_bottom.beta[1])/(-f_top.beta[0] + f_bottom.beta[0])", "def get_ELBO(self, length):\n # KL divergence between posterior and N(0,1) prior\n\n KL_div = (0.5 * (tf.reduce_sum(tf.sqrt(tf.reduce_sum(self.batch_mu**2,\n axis=1))) + tf.reduce_sum(self.batch_Tr_C) -\n tf.reduce_sum(self.batch_ld_C) - length))\n weight_reg = ((0.5 / self.k) *\n tf.sqrt(tf.reduce_sum(self.W**2)) *\n tf.sqrt(tf.reduce_sum(self.G**2)))\n return -(weight_reg + KL_div)", "def _gradients(self, partial):\r\n dL_dfhat, I_KW_i = self._shared_gradients_components()\r\n dlik_dthetaL, dlik_grad_dthetaL, dlik_hess_dthetaL = self.noise_model._laplace_gradients(self.f_hat, self.data, extra_data=self.extra_data)\r\n\r\n #len(dlik_dthetaL)\r\n num_params = len(self._get_param_names())\r\n # make space for one derivative for each likelihood parameter\r\n dL_dthetaL = np.zeros(num_params)\r\n for thetaL_i in range(num_params):\r\n #Explicit\r\n dL_dthetaL_exp = ( np.sum(dlik_dthetaL[:, thetaL_i])\r\n #- 0.5*np.trace(mdot(self.Ki_W_i, (self.K, np.diagflat(dlik_hess_dthetaL[thetaL_i]))))\r\n + np.dot(0.5*np.diag(self.Ki_W_i)[:,None].T, dlik_hess_dthetaL[:, thetaL_i])\r\n )\r\n\r\n #Implicit\r\n dfhat_dthetaL = mdot(I_KW_i, self.K, dlik_grad_dthetaL[:, thetaL_i])\r\n dL_dthetaL_imp = np.dot(dL_dfhat, dfhat_dthetaL)\r\n dL_dthetaL[thetaL_i] = dL_dthetaL_exp + dL_dthetaL_imp\r\n\r\n return dL_dthetaL", "def kl_divergence_bern_bern(z_pres_logits, prior_pres_prob, eps=1e-15):\n z_pres_probs = torch.sigmoid(z_pres_logits)\n kl = z_pres_probs * (torch.log(z_pres_probs + eps) - torch.log(prior_pres_prob + eps)) + \\\n (1 - z_pres_probs) * (torch.log(1 - z_pres_probs + eps) - torch.log(1 - prior_pres_prob + eps))\n\n return kl", "def evaluate(self, g1, g2):\n \n # activation contribution using gaussian kernel\n K_d = (self.d_kernel.evaluate(np.mat(g1.D).T.A, np.mat(g2.D).T.A)).flatten()\n K_d = (np.mat(K_d).T * np.mat(K_d)).A\n \n # geometrical contribution using gaussian kernel\n K_x = (self.x_kernel.evaluate(g1.X, g2.X)).flatten()\n K_x = (np.mat(K_x).T * np.mat(K_x)).A\n \n # structural contribution using linear kernel on binary values\n direct_adjacency = np.kron(g1.A, g2.A)\n K_s = direct_adjacency\n \n # with two of the three components\n K_xd = K_x * K_s\n #print K_fg\n K_sd = K_s * K_d\n K_sx = K_s * K_x\n # the full kernel with the three components\n K_sxd = K_s * K_x * K_d\n \n # sum all terms of these babies to compute the kernel values\n K_list = [K_sxd.sum(), K_xd.sum(), K_sd.sum(), K_sx.sum(), K_s.sum(), \n K_x.sum(), K_d.sum()]\n \n if self.subkernels:\n # return the full K_sga kernel and other kernels which do not use\n # all three types of features\n return np.array(K_list)\n else:\n # return only the full K_sga kernel \n return K_sxd.sum()", "def kl_divergence(y_true, y_pred):\n y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n y_true = math_ops.cast(y_true, y_pred.dtype)\n y_true = backend.clip(y_true, backend.epsilon(), 1)\n y_pred = backend.clip(y_pred, backend.epsilon(), 1)\n return math_ops.reduce_sum(y_true * math_ops.log(y_true / y_pred), axis=-1)", "def KL_divergence(xs,ys,pdf_x=None,pdf_y=None,data_range=None):\n if data_range is None:\n data_range = list(set(xs)) + list(set(ys))\n if pdf_x is None:\n pdf_x = prob_density_func(xs,norm=True,data_range=data_range)\n if pdf_y is None:\n pdf_y = prob_density_func(ys,norm=True,data_range=data_range)\n keys = set(pdf_x.keys()+pdf_y.keys())\n PQ = []\n for k in keys:\n if k in pdf_x and k in pdf_y:\n PQ.append((pdf_x[k],pdf_y[k]))\n return np.sum([p*np.log(float(p)/float(q)) for (p,q) in PQ if q>0 and p>0])", "def _kl_divergence(p, p_logits, q):\n for tensor in [p, p_logits, q]:\n if not tensor.dtype.is_floating:\n raise ValueError('Input %s must be floating type.', tensor.name)\n p.shape.assert_has_rank(2)\n p_logits.shape.assert_has_rank(2)\n q.shape.assert_has_rank(1)\n return math_ops.reduce_sum(\n p * (nn_ops.log_softmax(p_logits) - math_ops.log(q)), axis=1)", "def get_Kl_divergence(model1, model2, collection, lam, missing_val = 0.0001):\n smoot_m2 = {key: (1-lam)*model2.get(key, 0) + lam*collection.get(key, missing_val) for key in model1}\n\n divergence = sum([model1[key]*math.log(model1[key]/smoot_m2[key]) for key in model1])\n return divergence", "def kl_divergence(mu, logvar):\n klds = -0.5*(1 + logvar - mu.pow(2) - logvar.exp())\n total_kld = klds.sum(1).mean(0, True)\n dimension_wise_kld = klds.mean(0)\n mean_kld = klds.mean(1).mean(0, True)\n\n return total_kld, dimension_wise_kld, mean_kld", "def kl_divergence(self):\n return self._kl_divergence_func", "def kl_divergence(eta_q, eta_p):\n return ev_t(eta_q) @ (eta_q - eta_p) - a(eta_q) + a(eta_p)", "def _cal_score_kl_divergence(self, h_mu, h_sigma, r_mu, r_sigma, t_mu, t_sigma):\n comp_sigma = h_sigma + r_sigma\n comp_mu = h_mu + r_mu\n trace_fac = (comp_sigma / t_sigma).sum(-1)\n mul_fac = ((t_mu - comp_mu) ** 2 / t_sigma).sum(-1)\n det_fac = (torch.log(t_sigma) - torch.log(comp_sigma)).sum(-1)\n return trace_fac + mul_fac + det_fac - self.hidden_size", "def build_reparam_kl_loss_and_gradients(inference, var_list):\n \n p_log_lik = [0.0] * inference.n_samples\n base_scope = tf.get_default_graph().unique_name(\"inference\") + '/'\n for s in range(inference.n_samples):\n # Form dictionary in order to replace conditioning on prior or\n # observed variable with conditioning on a specific value.\n scope = base_scope + tf.get_default_graph().unique_name(\"sample\")\n dict_swap = {}\n for x, qx in six.iteritems(inference.data):\n if isinstance(x, RandomVariable):\n if isinstance(qx, RandomVariable):\n qx_copy = copy(qx, scope=scope)\n dict_swap[x] = qx_copy.value()\n else:\n dict_swap[x] = qx\n\n for z, qz in six.iteritems(inference.latent_vars):\n # Copy q(z) to obtain new set of posterior samples.\n qz_copy = copy(qz, scope=scope)\n dict_swap[z] = qz_copy.value()\n\n for x in six.iterkeys(inference.data):\n if isinstance(x, RandomVariable):\n x_copy = copy(x, dict_swap, scope=scope)\n p_log_lik[s] += tf.reduce_sum(\n inference.scale.get(x, 1.0) * x_copy.log_prob(dict_swap[x]))\n\n p_log_lik = tf.reduce_mean(p_log_lik)\n\n kl_penalty = tf.reduce_sum([\n tf.reduce_sum(inference.kl_scaling.get(z, 1.0) * kl_divergence(qz, z))\n for z, qz in six.iteritems(inference.latent_vars)])\n\n if inference.logging:\n tf.summary.scalar(\"loss/p_log_lik\", p_log_lik,\n collections=[inference._summary_key])\n tf.summary.scalar(\"loss/kl_penalty\", kl_penalty,\n collections=[inference._summary_key])\n\n loss = -(p_log_lik - kl_penalty)\n grads = tf.gradients(loss, var_list)\n grads_and_vars = list(zip(grads, var_list))\n return loss, grads_and_vars", "def learn(self, Xtrain, ytrain):\n pass\n self.weights = np.zeros(Xtrain.shape[1],)\n\n ### YOUR CODE HERE\n \n lmbd = self.params['lamb']\n \n numsamples = Xtrain.shape[0]\n # Xless = Xtrain[:,self.params['features']]\n Xless = Xtrain\n self.weights = np.random.rand(Xless.shape[1])\n err = 10000;\n #cw =0;\n tolerance = 10*np.exp(-4)\n i=0;\n \n \n w1 = self.weights\n # cw_v =(np.dot(Xless, self.weights)-ytrain)\n #cw = (np.linalg.norm(cw_v)**2)/(2*numsamples)\n cw_v = np.dot(Xless, self.weights.T)\n cw = self.logit_cost(cw_v, Xless, ytrain) + lmbd * self.regularizer[0](self.weights)\n # print(cw)\n errors = []\n runtm = []\n epch = []\n \n err = 1\n iteration= 1000\n #tm= time.time()\n while (abs(cw-err)>tolerance) and (i <iteration):\n err = cw\n g = self.logit_cost_grad(cw_v, Xless, ytrain)\n obj = cw\n j=0\n ita = -1* self.params['stepsize']\n w = self.weights\n # w1 = np.add(w,np.dot(ita,g))\n while(j<iteration):\n w1 = np.add(w,np.dot(ita,g))\n # cw_v =(np.dot(Xless, w1)-ytrain)\n # cw = (np.linalg.norm(cw_v)**2)/(2*numsamples)\n cw_v = np.dot(Xless, w1.T)\n cw = self.logit_cost(cw_v, Xless, ytrain)+lmbd * self.regularizer[0](w1)\n ## print (cw)\n \n if(cw<np.absolute(obj-tolerance)): ############################################\n break\n ita = 0.7*ita\n j=j+1\n \n if(j==iteration):\n self.weights=w\n ita =0\n else:\n self.weights = w1\n \n # cw_v =(np.dot(Xless, self.weights)-ytrain)\n #cw = (np.linalg.norm(cw_v)**2)/(2*numsamples)\n cw_v = np.dot(Xless, self.weights.T)\n cw = self.logit_cost(cw_v, Xless, ytrain)\n #tm1 = time.time()-tm\n #runtm.append(tm1)\n #err = cw\n errors.append(err)\n i=i+1\n epch.append(i)", "def matern_kernel(x_1, x_2, l, v):\n\n\tassert l > 0 and v > 0, \"The hyperparameters l and v must be > 0\"\n\tdist = euclidean_distances(x_1.reshape(-1,1), x_2.reshape(-1,1))\n\tdist[dist == 0.0] += 1e-10\n\tz = np.sqrt(2*v) * dist / l\n\treturn (2**(1-v)/gamma(v)) * (z**v) * kv(v, z)", "def verhulst(nb_init, t0, tf, eps, methode, gamma, K) :\n f=lambda y,t : gamma*y*(1-y/K)\n Y=meth_epsilon(nb_init, t0, tf, eps, f, methode)\n return Y", "def _log_bessel_kve_bwd(aux, g):\n v, z = aux\n dtype = dtype_util.common_dtype([v, z], tf.float32)\n numpy_dtype = dtype_util.as_numpy_dtype(dtype)\n\n log_kve = _log_bessel_kve_custom_gradient(v, z)\n grad_z = tfp_math.log_add_exp(\n _log_bessel_kve_custom_gradient(v - 1., z),\n _log_bessel_kve_custom_gradient(v + 1., z)) - numpy_dtype(\n np.log(2.)) - log_kve\n grad_z = g * -tf.math.expm1(grad_z)\n _, grad_z = tfp_math.fix_gradient_for_broadcasting(\n [v, z], [tf.ones_like(grad_z), grad_z])\n\n # No gradient for v at the moment. This is a complicated expression\n # The gradient with respect to the parameter doesn't have an easy closed\n # form. More work will need to be done to ensure good numerics for the\n # gradient.\n # TODO(b/169357627): Implement gradients of modified bessel functions with\n # respect to parameters.\n\n return None, grad_z", "def divergence(w0, lambda0, M2=1):\n return 2*w0/z_rayleigh(w0, lambda0, M2)", "def kullbackLeibler(mu, log_sigma):\n # (tf.Tensor, tf.Tensor) -> tf.Tensor\n # = -0.5 * (1 + log(sigma**2) - mu**2 - sigma**2)\n return -0.5 * tf.reduce_sum(1 + 2 * log_sigma - mu**2 - tf.exp(2 * log_sigma), 1)", "def grad(self, K, y, ak):\n Ka = K.dot(ak) # precompute\n z = y * Ka # decision value for each observation\n grad = (-1*K[z < 1].T.dot(y[z < 1])) / y.size # gradient of hinge\n l2 = (2 * self.lambda_ * Ka) # gradient of l2\n # Don't regularize offset dimension\n grad[:self.offset(ak)] = grad[:self.offset(ak)] + l2[:self.offset(ak)]\n # Gradient normalized by the num obs\n return grad", "def gaussian_kl_np(mu0, log_std0, mu1, log_std1):\n var0, var1 = np.exp(2 * log_std0), np.exp(2 * log_std1)\n pre_sum = 0.5*(((mu1- mu0)**2 + var0)/(var1+EPS) - 1) + log_std1 - log_std0\n all_kls = pre_sum\n #all_kls = np.mean(all_kls)\n all_kls = np.clip(all_kls, 0, 1/EPS) ### for stability\n return all_kls", "def calculate_convergence(v1, v2):\r\n\r\n return norm(v2 - v1, ord=1)", "def gauss_seidel(A, b, tol=1e-8, maxiters=100, plot=False):\n A=np.array(A)*1.0\n b=np.array(b)*1.0 \n m,n=A.shape\n e=[]\n xk=np.zeros((m,))\n \n def iter(xi):\n xj=np.zeros((m,))\n for i in xrange(m):\n xj[i]=(b[i]-(np.dot(A[i],xi)-A[i,i]*xi[i]))/A[i,i]\n xi[i]=xj[i]\n return xj\n\n if plot==True: \n for i in xrange(1,maxiters+1):\n e+=[la.norm(np.dot(A,xk)-b,ord=np.inf)]\n #print i-1,e[i-1],xk\n xk=iter(xk)\n if (la.norm(np.dot(A,xk)-b,ord=np.inf)<tol) or (i==maxiters):\n e+=[la.norm(np.dot(A,xk)-b,ord=np.inf)]\n break\n #How many iterations happened\n iters=len(e) #1..len(e)\n dom=np.arange(0,iters)\n \n plt.semilogy(dom,e,'b.-',basey=10,lw=2, ms=2)\n plt.xlabel(\"Iteration #\")\n plt.ylabel(\"Absolute Error of Approximation\")\n #plt.legend(loc=\"upper left\")\n plt.title(\"Convergence of Gauss-Seidel Method\", fontsize=18)\n plt.show()\n return xk\n \n else:\n for i in xrange(1,maxiters+1):\n xk=iter(xk)\n if (la.norm(np.dot(A,xk)-b,ord=np.inf)<tol) or (i==maxiters):\n return xk", "def backward_G(self):\n mask = self.mask*0.5 + 0.5\n\n self.loss_G_SH = self.criterionS(self.pr_SH*mask, self.gt_SH*mask) * self.opt.lambda_S\n self.loss_G = self.loss_G_SH\n\n if not self.opt.no_brightness:\n self.loss_G_BA = self.criterionBA(self.pr_BA*mask, self.gt_BA*mask) * self.opt.lambda_BA\n self.loss_G_BC = 0\n for i in range(25):\n gt_BC = self.gt_BC[i][:, :2]\n bc_num = int(self.gt_BC[i][0, 3].item())\n pr_BC = self.pr_BC[i]\n loss_G_BC = util.min_loss_BC_NoBatch(pr_BC, gt_BC, bc_num, self.criterionBC)\n loss_G_BC = loss_G_BC * self.opt.lambda_BC / 25.0\n self.loss_G_BC += loss_G_BC\n\n loss_B = self.loss_G_BA + self.loss_G_BC\n self.loss_G += loss_B\n\n # Third, LTM Regularization\n if self.opt.reg_LTM:\n ltm_mean = torch.mean(self.ltm, dim=0, keepdim=True) # [1, 75, 256, 256]\n ltm_mean = ltm_mean.expand(self.ltm.size(0), ltm_mean.size(1), ltm_mean.size(2), ltm_mean.size(3)) # [25, 75, 256, 256]\n self.loss_LTMReg = self.criterionReg(self.ltm, ltm_mean) * self.opt.lambda_regLTM\n self.loss_G += self.loss_LTMReg\n\n\n self.loss_G.backward()", "def kullback_leibler_divergence(p, q):\n null = 1e-10\n return sum(p.get(key, null) * math.log(p.get(key, null) / q.get(key, null))\n for key in set(chain(p.keys(), q.keys())))", "def sinkhorn_log(mu,nu,c,epsilon, \n options={'niter':1000, 'tau':-0.5, 'rho':np.inf}):\n\n for key,val in zip(['tau','rho','niter'],[-.5,np.inf,500]):\n options.setdefault(key, val)\n rho,tau,niter = options['rho'],options['tau'],options['niter']\n\n lam = rho/(rho+epsilon)\n if rho==np.inf:\n lam=1.0\n\n H1 = np.ones_like(mu)\n H2 = np.ones_like(nu)\n\n ave = lambda tau, u, u1: tau*u+(1-tau)*u1\n\n lse = lambda A: np.log(np.sum(np.exp(A),axis=1))\n M = lambda u,v:(-c+u[:,np.newaxis]@H2[np.newaxis,:] + H1[:,np.newaxis]@v[np.newaxis,:] )/epsilon\n\n # kullback divergence\n H = lambda p: -np.sum( p.flatten()*(np.log(p.flatten()+1e-20)-1) )\n KL = lambda h,p: np.sum( h.flatten()* np.log( h.flatten()/p.flatten() ) - h.flatten()+p.flatten())\n KLd = lambda u,p: np.sum( p.flatten()*( np.exp(-u.flatten()) -1) )\n dotp = lambda x,y: np.sum(x*y); \n\n err,Wprimal,Wdual = [],[],[]\n u = np.zeros_like(mu)\n v = np.zeros_like(nu)\n\n for _ in range(niter):\n u1=u\n u = ave(tau, u, lam*epsilon*np.log(mu) - lam*epsilon*lse( M(u,v) ) + lam*u )\n v = ave(tau, v, lam*epsilon*np.log(nu) - lam*epsilon*lse( M(u,v).T) + lam*v )\n gamma = np.exp(M(u,v))\n\n if rho==np.inf: \n Wprimal.append(dotp(c,gamma) - epsilon*H(gamma))\n Wdual.append( dotp(u,mu) + dotp(v,nu) - epsilon*np.sum(gamma) )\n err.append( np.linalg.norm( np.sum(gamma,axis=1)-mu ) )\n else:\n Wprimal.append( dotp(c,gamma) - epsilon*H(gamma) \\\n + rho*KL(np.sum(gamma,axis=1),mu) \\\n + rho*KL(np.sum(gamma,axis=0),nu) )\n\n Wdual.append( -rho*KLd(u/rho,mu) - rho*KLd(v/rho,nu) \\\n - epsilon*np.sum( gamma))\n err.append(np.linalg.norm(u-u1, ord=1) )\n \n WDistance = Wprimal[-1]+epsilon*H(gamma)\n\n return gamma,Wprimal,Wdual,err,WDistance", "def _bessel_kve_bwd(aux, g):\n v, z = aux\n kve = _bessel_kve_custom_gradient(v, z)\n grad_z = g * ((z - v) / z * kve - _bessel_kve_custom_gradient(v - 1., z))\n _, grad_z = tfp_math.fix_gradient_for_broadcasting(\n [v, z], [tf.ones_like(grad_z), grad_z])\n\n # No gradient for v at the moment. This is a complicated expression\n # The gradient with respect to the parameter doesn't have an easy closed\n # form. More work will need to be done to ensure good numerics for the\n # gradient.\n # TODO(b/169357627): Implement gradients of modified bessel functions with\n # respect to parameters.\n\n return None, grad_z", "def maxkl_strategy(self):\n # TODO: rewrite to update only distribution from sampled bucket\n # Instead of computing everything again every iteration\n\n # Label model distributions\n lm_posteriors = self.bucket_probs.clip(1e-5, 1-1e-5)\n\n # Sample distributions\n # D_KL(LM distribution||Sample distribution)\n rel_entropy = np.zeros(len(lm_posteriors))\n sample_posteriors = np.zeros(lm_posteriors.shape)\n\n # Iterate over buckets\n for i in range(len(lm_posteriors)):\n # Collect points in bucket\n bucket_items = self.ground_truth_labels[np.where(self.unique_inverse == i)[0]]\n # Collect labeled points in bucket\n bucket_gt = list(bucket_items[bucket_items != -1])\n # Add initial labeled point\n if not bucket_gt:\n bucket_gt.append(\n int(np.round(\n self.probs[\"bucket_labels_train\"][0][i].clip(0, 1)\n )))\n bucket_gt = np.array(bucket_gt)\n\n # Bucket distribution, clip to avoid D_KL undefined\n eps = 1e-2 / (len(bucket_gt))\n sample_posteriors[i, 1] = bucket_gt.mean().clip(eps, 1 - eps)\n sample_posteriors[i, 0] = 1 - sample_posteriors[i, 1]\n\n # KL divergence\n rel_entropy[i] = entropy(lm_posteriors[i, :], sample_posteriors[i, :])\n self.bucket_values = rel_entropy\n\n # Select buckets with highest KL divergence\n return np.where(\n np.logical_and(\n rel_entropy == np.max(rel_entropy[self.is_valid_bucket]), self.is_valid_bucket\n )\n )[0]", "def _log_likelihood_gradient(self, z, K, H, B, Kinv):\n\n nparams = 4\n grad = np.zeros((nparams,))\n\n #t0 = time.time()\n tmp = np.dot(self.invc, self.HKinv)\n #t1 = time.time()\n K_HBH_inv = Kinv - np.dot(tmp.T, tmp)\n #t2 = time.time()\n alpha_z = np.dot(K_HBH_inv, z)\n #t3 = time.time()\n\n #print \"gradient: %f %f %f\" % (t1-t0, t2-t1, t3-t2)\n\n for i in range(nparams):\n tA = time.time()\n if (i == 0):\n dKdi = np.eye(self.n)\n else:\n dKdi = self.predict_tree.kernel_deriv_wrt_i(self.X, self.X, \"se\", self.wfn_params, i-1)\n\n dlldi = .5 * np.dot(alpha_z.T, np.dot(dKdi, alpha_z))\n tB = time.time()\n # here we use the fact:\n # trace(AB) = sum_{ij} A_ij * B_ij\n dlldi -= .5 * np.sum(np.sum(K_HBH_inv.T * dKdi))\n\n grad[i] = dlldi\n tC = time.time()\n print \" %d: %f %f\" % (i, tB-tA, tC-tB)\n\n return grad", "def KL_function_gaussian(self, H, data):\n\t\t#Initialize stuff\n\t\tn_data = float(len(data))\n\t\tn_dim = float(len(H))\n\t\t\n\t\t#Calculate the KL function by iterating\n\t\tKL_func = 0.\n\t\tfor i in xrange(int(n_data)):\n\t\t\tKL_func += np.log10( -0.9999999999999999 + np.sum( np.exp(-0.5 * np.sum( ((data[i,:] - data[:,:])/H[:])**2., axis=-1)), axis=-1) )\n\t\tKL_func /= n_data\n\t\tKL_func -= np.log10( (n_data-1.) * np.sqrt( (2.*np.pi)**n_dim * np.prod(H[:])**2.) )\n\n\t\treturn KL_func", "def LBFGS(fun, x, args=(), jac=None, x_old=None, maxcor=5, gtol = None, g2tol=1e-10, maxiter=10000,\n maxls=20, store_iterates=\"iterate\", printdb=donothing, linesearch_options={}):\n\n\n\n if x_old is None:\n x_old = x.copy()\n\n x,grad,x_old,grad_old = steepest_descent_wolfe2(x_old, fun, jac,**linesearch_options)\n\n k=1\n\n n = x.size # Dimension of x\n\n gamma = 1\n\n S = np.zeros((n, 0))\n Y = np.zeros((n, 0))\n R = np.zeros((0, 0))\n STgrad = np.array((1, maxcor))\n YTgrad = np.array((1, maxcor))\n\n grad = np.asarray(jac(x))\n grad2 = np.sum(grad**2)\n grad_old = np.asarray(jac(x_old))\n\n alpha=0\n\n # Start loop\n\n iterates = list()\n if store_iterates == 'iterate':\n iterate = scipy.optimize.OptimizeResult(\n {'x': x_old.copy(),\n 'fun': fun(x_old),\n 'jac': grad_old.copy()})\n iterates.append(iterate)\n\n iterate = scipy.optimize.OptimizeResult(\n {'x': x.copy(),\n 'fun': fun(x),\n 'jac': grad})\n iterates.append(iterate)\n\n\n while True:\n #printdb(k)\n #printdb(\"grads\")\n #printdb(grad)\n #printdb(grad_old)\n\n # Update Sk,Yk\n if k > maxcor:\n S = np.roll(S, -1)\n S[:, -1] = (x - x_old).flat\n Y = np.roll(Y, -1)\n Y[:, -1] = (grad - grad_old).flat\n\n else:\n S = np.hstack([S, x - x_old])\n Y = np.hstack([Y, grad - grad_old])\n #printdb(\"S: {}\".format(S))\n #printdb(\"Y: {}\".format(Y))\n\n # 2.\n grad2prev = grad2.copy()\n grad2 = np.sum(grad ** 2) # ok\n\n # check if job is done\n if ((grad2 < g2tol if g2tol is not None else True) and\n (np.max(np.abs(grad)) < gtol if gtol is not None else True)):\n result = scipy.optimize.OptimizeResult({'success': True,\n 'x': x,\n 'nit': k,\n 'iterates': iterates})\n\n #if iterates:\n # result['iterates'] = iterates\n return result\n\n if k > maxiter:\n result = scipy.optimize.OptimizeResult({'success': False,\n 'x': x,\n 'nit': k,\n 'iterates':iterates})\n\n #if iterates:\n # result['iterates'] = iterates\n return result\n\n STgrad_prev = STgrad.copy()\n YTgrad_prev = YTgrad.copy()\n\n STgrad = np.dot(S.T, grad)\n YTgrad = np.dot(Y.T, grad) # OK, this is new\n\n #printdb(\"STgrad : {}\".format(STgrad))\n #printdb(\"YTgrad: {}\".format(YTgrad))\n\n if k > maxcor:\n w = np.vstack([STgrad_prev, gamma * YTgrad_prev])\n S_now_T_grad_prev = np.roll(STgrad_prev,-1)\n S_now_T_grad_prev[-1] = - alpha * gamma * grad2prev - alpha * w.T.dot(p)\n else : # straightforward Version\n S_now_T_grad_prev = np.dot(S.T, grad_old)\n\n #printdb(\"S_now_T_grad_prev {}\".format(S_now_T_grad_prev))\n #np.testing.assert_allclose(S_now_T_grad_prev,np.dot(S.T, grad_old),\n # err_msg=\"Maybe the assumption of Theorem 2.2\"\n # \"is not valid: sk-1Tyk-1 = {}\".format(S[:,-1].T.dot(Y[:,-1])))\n\n # 3. # TOOPTIMIZE\n #sprevTgradprev = np.dot(S[:, -1].T, grad_old) # sk-1T gk-1\n\n #%% 4.\n #ykm12 = np.dot(Y[:, -1].T, Y[:, -1]) #TOOPTIMIZE\n\n #printdb(\"before\")\n #printdb(\"R: {}\".format(R))\n if k > maxcor:\n R = np.roll(R, (-1, -1), axis=(0, 1)) # mxm Matrix hold by all Processors\n R[-1, :] = 0\n STym1 = STgrad - S_now_T_grad_prev\n R[:, -1] = STym1.flat #O(m x n)\n\n elif k == 1:\n R = np.triu(np.dot(S.T, Y))\n else:\n R = np.vstack([R, np.zeros(k - 1)])\n R = np.hstack([R, np.dot(S.T, Y[:, -1]).reshape(k, 1)])\n\n #np.testing.assert_allclose(R, np.triu(np.dot(S.T, Y)))\n\n if k > maxcor:\n D = np.roll(D, (-1, -1), axis=(0, 1))\n # D[-1,-1] = np.dot(Y[:,-1],Y[:,-1])# yk-1Tyk-1 # TOOPTIMIZE\n D[-1, -1] = R[-1,-1]\n else:\n #D = np.diag(np.einsum(\"ik,ik -> k\", S, Y))\n D=np.diag(R.diagonal())\n\n assert D[-1,-1] >0, \"k = {}: \".format(k) # Assumption of Theorem 2.2\n #np.testing.assert_allclose(np.diag(D), np.diag(R))\n\n # YTY = np.dot(Y.T,Y) #TOPTIMIZED\n if k > maxcor:\n YTY = np.roll(YTY, (-1, -1), axis=(0, 1))\n #printdb(YTgrad)\n #printdb(YTgrad_prev)\n YTY[-1, :-1] = YTY[:-1, -1] = (YTgrad[:-1] - YTgrad_prev[1:]).flat\n YTY[-1, -1] = grad2prev - grad2 + 2 * YTgrad[-1]\n else:\n YTY = np.dot(Y.T, Y)\n #np.testing.assert_allclose(YTY, np.dot(Y.T, Y))\n ##\n #printdb(\"after\")\n #printdb(\"R: {}\".format(R))\n #printdb(\"YTY: {}\".format(YTY))\n #printdb(\"D: {}\".format(D))\n\n #%% 5.\n gamma = D[-1, -1] / YTY[-1,-1] # n.b. D[-1,-1] = sk-1T yk-1 = yk-1T sk-1\n\n #%% 6.\n #Rinv = np.linalg.inv(R)\n Rinv = scipy.linalg.solve_triangular(R,np.eye(min(k,maxcor)))\n\n RiSg = Rinv.dot(STgrad)\n\n p = np.vstack([Rinv.T.dot(D + gamma * YTY).dot(RiSg) - gamma * Rinv.T.dot(YTgrad)\n , - RiSg])\n\n #%% 7.\n Hgrad = gamma * grad + np.hstack([S, gamma * Y]).dot(p)\n\n #%% linesearch\n\n #reslinesearch = scipy.optimize.minimize_scalar(fun=lambda alpha: fun(x - Hgrad * alpha), bounds=(0, 10), method=\"bounded\")\n #assert reslinesearch.success, \"Linesearch not converged\"\n # line_search did cause problems, maybe because of the differentz interpretation of the arrays\n #alpha,fc,gc,new_fval,old_fval,new_slope = scipy.optimize.line_search(fun,lambda x_ : fprime(x_).flatten(),x, - Hgrad.flatten() , c1=1e-4,c2=0.9,maxiter=20)\n\n #printdb(\"assert descent direction\")\n #assert fun(x - Hgrad * 0.001) - fun(x) < 0\n #printdb(fun(x - Hgrad * 0.001) - fun(x))\n \n alpha,phi,phi0,derphi = scipy.optimize.linesearch.scalar_search_wolfe2(lambda alpha: fun(x - Hgrad * alpha), lambda alpha: np.dot(jac(x - Hgrad * alpha).T, -Hgrad),maxiter = maxls,**linesearch_options)\n\n if derphi is None:\n import matplotlib.pyplot as plt\n figdebug,axdebug=plt.subplots()\n alphas = np.linspace(-1,10)\n\n axdebug.plot(alphas,[fun(x - a * Hgrad) for a in alphas] )\n figdebug.show()\n printdb(\"scalar line search did not converge\")\n printdb(\"alpha: {}\".format(alpha))\n plt.show(block=True)\n\n assert derphi is not None, \"scalar line-search did not converge\"\n #assert new_fval is not None, \"Line-search didn't converge\"\n #printdb(\"x: {}\".format(x))\n x_old[:] = x\n #printdb(\"x_old: {}\".format(x_old))\n x = x - Hgrad * alpha\n #printdb(\"x: {}\".format(x))\n #printdb(\"x_old: {}\".format(x_old))\n #printdb(\"x = {}\".format(x))\n #assert phi < phi0, \"f(x) >= f(x_old) ! \"\n grad_old[:] = grad\n grad = jac(x)\n #assert fun(x) <= fun(x_old) + 1e-4 * alpha * grad_old.T.dot(-Hgrad), \"First Wolfe Condition not fullfilled\"\n #assert grad.T.dot(-Hgrad) >= 0.9 * grad_old.T.dot(-Hgrad), \"second Wolfe Condition not fullfilled\"\n #printdb(\"dx * -Hgrad:{}\".format((x-x_old).T.dot(-Hgrad)))\n #printdb(alpha)\n #assert (grad - grad_old).T.dot(x - x_old) > 0, \"msg derphi = {}\".format(derphi)\n if store_iterates == 'iterate':\n iterate = scipy.optimize.OptimizeResult(\n {'x': x.copy(),\n 'fun': phi,\n 'jac': grad})\n iterates.append(iterate)\n\n k = k + 1", "def b_gradient_descent(self, LB,UB,eta, tol,iter):\n bgd=[]\n bgd_x=[LB]\n iteration=0\n # current_pt=X\n first_derivative=sym.diff(self.gdfunc)\n #print(first_derivative)\n x=sym.Symbol('x')\n first_derivative=sym.lambdify(x,first_derivative)\n learn_rate=eta\n \n new_x=LB\n bgd_x.append(LB)\n \n for i in range(iter):\n for j in np.arange(LB,UB,0.1):\n prev_x=new_x\n new_x=prev_x-(learn_rate*first_derivative(prev_x))\n #print(\"i = \",j,\"gradient =\",(learn_rate*first_derivative(j)),iteration)\n iteration=iteration+1\n #print(iteration)\n if iteration >=iter:\n break \n if new_x <= tol:\n #print(\"new_x = \",new_x,\"gradient =\",(learn_rate*first_derivative(prev_x)), iteration) \n break\n \n \n \n \n #print(new_x)\n bgd_x.append(new_x)\n \n \n bgd.append(bgd_x)\n bgd.append(new_x)\n bgd.append(iteration)\n\n return bgd", "def get_hbls_hbbl(self):\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w']\n z_u_r = self.grid_dict['z_u_r']\n u = self.u\n v = self.v\n \n v_upts = TTTW_func.v2u(v)\n Hz = z_u_w[:,1:] - z_u_w[:,:-1]\n\n\n\n # CALCULATE swr_frac\n self.swr_frac = TTTW_func.lmd_swr_frac(self.grid_dict)\n\n\n # WHOLE THING HAPPENS IN j loop through y-indices\n \n # INITIALIZE ARRAYS\n self.kmo = np.zeros([Ly])\n self.Cr = np.zeros([Ly])\n self.kbl = np.empty([Ly],dtype='int')\n self.C_h_MO = np.zeros([Ly])\n self.Cr = np.zeros([Ly,N+1]) # sum term\n self.FC = np.zeros([Ly,N+1])\n self.swdk_r = np.zeros([Ly,N+1])\n \n self.zscale = np.zeros([Ly,N])\n self.Kern = np.zeros([Ly,N])\n\n \n # --> LOOP THROUGH Y-INDICES\n for j in range(Ly):\n if self.LIMIT_MO_DEPTH:\n self.kmo[j] = 0\n self.C_h_MO[j] = self.C_MO *self.ustar[j]**3/self.vonKar\n \n self.kbl[j] = 0\n self.Cr[j,-1] = 0 # set top Cr\n self.Cr[j,0] = 0 # set bottom Cr\n \n # SEARCH FOR MIXED LAYER DEPTH\n self.FC[j,-1] = 0.\n\n\n # ---> LOOP TOP TO BOTTOM (FORTRAN ==> k=N-1,1,-1)\n for k in range(N-1,0,-1):\n # INDEX MAP\n k_r = k-1\n k_w = k\n\n \n zscale = z_u_w[j,N] - z_u_r[j,k_r]\n self.zscale[j,k_w] = zscale\n if self.LMD_KPP:\n if self.LMD_BKPP:\n zscaleb = z_u_r[j,k_r] - z_u_w[j,0]\n Kern = zscale * zscaleb**2 / ( (zscale + self.epssfcs*self.hbls_old[j]) * (zscaleb**2+(self.epssfcb**2*self.hbbl_old[j]**2)))\n else:\n Kern = zscale / (zscale + (self.epssfcs*self.hbls_old[j]))\n else:\n Kern = 1.\n \n\n\n self.Kern[j,k_w] = Kern\n self.FC[j,k_w] = self.FC[j,k_w+1] + Kern * (\\\n ( ( u[j,k_r+1] - u[j,k_r] )**2 + ( v_upts[j,k_r+1] - v_upts[j,k_r])**2 ) \\\n / (Hz[j,k_r] + Hz[j,k_r+1]) \\\n - 0.5 * ( Hz[j,k_r] + Hz[j,k_r+1]) * (self.Ri_inv * self.bvf[j,k_w] + self.C_Ek*self.f[j]*self.f[j]))\n\n\n #\t\tLOOP THAT FINDS BL DEPTH ##\n #----> LOOP TOP TO BOTTOM (start at free surface, w-level surface) \n \n if self.LMD_KPP:\n #swdk_r only used in this function so don't need to be class attribute\n # but for testing make it an attribute to see what it is\n \n # fortran equivlanet ===> k=N,1,-1 \n for k in range(N,0,-1):\n # INDEX MAP\n k_r = k-1\n k_w = k\n\n ###################################################################### \n self.swdk_r[j,k_w] = np.sqrt( self.swr_frac[j,k_w] * self.swr_frac[j,k_w-1])\n zscale = z_u_w[j,N] - z_u_r[j,k_r]\n Bfsfc = self.Bo[j] + self.Bosol[j] * (1-self.swdk_r[j,k_w])\n \n self.bvf_max = np.sqrt(np.max([0,self.bvf[j,k_w-1]]))\n \n # CALCULATE TURBULENT VELOCITY SCALE FOR TRACERS\n \t\t\t self.ws = self.lmd_wscale_ws_only(Bfsfc, zscale,self.hbls_old[j],self.ustar[j])\n \n self.Vtsq = self.Vtc * self.ws* self.bvf_max + self.V0\n \n\n self.Cr[j,k_w] = self.FC[j,k_w] + self.Vtsq\n \n\n #######################################################################\n \n # SEARCH FOR hbls vertical level #\n '''\n kbl is specified at vertical w-level (via Cr which is at\n vertical w-levels)\n '''\n if self.kbl[j] == 0 and self.Cr[j,k_w] < 0:\n self.kbl[j] = k_w\n if self.LIMIT_MO_DEPTH:\n if self.kmo[j] == 0 and Bfsfc*(z_u_w[j,N] - z_u_r[j,k_r]) > self.C_h_MO[j]:\n self.kmo[j] = k_w\n\n \n #--> still in j-loop\n #######################################################\n \n # \t\tGET SURFACE BOUNDARY LAYER DEPTH # \n self.hbls[j] = z_u_w[j,N] - z_u_w[j,0] + self.eps # set hbls as depth of entire water column\n if self.kbl[j] > 0:\n k_w = self.kbl[j]\n k_r = k_w - 1 \n if k_w == N: # set hbls at the surface btwn w- and rho-levels at surface\n self.hbls[j] = z_u_w[j,N] - z_u_r[j,N-1]\n \n else:\n self.hbls[j] = z_u_w[j,N] - ( z_u_r[j,k_r] * self.Cr[j,k_w+1] - z_u_r[j,k_r+1] * self.Cr[j,k_w]) / \\\n (self.Cr[j,k_w+1] - self.Cr[j,k_w])\n \n if self.LIMIT_MO_DEPTH:\n if self.kmo[j] > 0:\n k_w = self.kmo[j]\n k_r = k_w-1\n if k_w == N:\n z_up = z_u_w[j,N]\n cff_up = np.max([0,Bo[j]])\n else:\n z_up = z_r[j,k_w+1]\n cff_up = np.max([0, Bo[j] + self.Bosol[j]*(1-self.swdk_r[j,(k_w-1)+1])])\n \n cff_dn = np.max([0,Bo[j] + self.Bosol[j] * (1-self.swdk_r[j,k_w])]) \n h_MO = z_u_w[j,N] + self.C_h_MO[j] * ( cff_up*z_up - cff_dn * z_u_r[j,k_r] ) \\\n / ( cff_up * cff_dn * (z_up - z_u_r[j,k_r]) ) \\\n + self.C_h_MO[j] * (cff_dn - cff_up)\n\n self.hbls[j] = np.min([self.hbls[j],np.max([h_MO,0])])\n\n\n\n #### GET BOTTOM BOUNDARY LAYER DEPTH #######\n if self.LMD_BKPP:\n self.kbl[j] = 0 # reset Cr at bottom and kbl for BKPP\n self.Cr[j,0] = 0.\n self.FC[j,0] = 1.5 * self.FC[j,1] - 0.5 * self.FC[j,2] # linear extrapolation\n \n #---> LOOP BOTTOM TO TOP\n # FIND kbl for BBL\n for k in range(1,N+1):\n k_r = k-1\n k_w = k \n self.Cr[j,k_w] = self.FC[j,k_w] - self.FC[j,0]\n \n # LOOK FOR FIRST ZERO CROSSING FROM BOTTOM UP\n if self.kbl[j] == 0 and self.Cr[j,k_w] > 0:\n self.kbl[j] = k_w \n \n\n self.hbbl[j] = z_u_w[j,N] - z_u_w[j,0] # total depth\n if self.kbl[j] > 0 :\n k_w = self.kbl[j] \n k_r = k_w -1\n if k_w == 1: # NO BBL CASE\n self.hbbl[j] = z_u_r[j,0] - z_u_w[j,0] #in between bottom rho and w-level\n else:\n self.hbbl[j] = ( z_u_r[j,k_r-1] * self.Cr[j,k_w] - z_u_r[j,k_r] * self.Cr[j,k_w-1]) / \\\n (self.Cr[j,k_w] - self.Cr[j,k_w-1]) - z_u_w[j,0]", "def evaluate(self, g1, g2):\n \n # depth contribution using gaussian kernel\n K_dd = self.d_kernel.evaluate(g1.D, g2.D).flatten()\n K_d = vprod(K_dd) # matrix product of the two vectors K_dd.T and K_dd\n\n # geometrical (coordinates) contribution using gaussian kernel\n K_xx = self.x_kernel.evaluate(g1.X, g2.X).flatten()\n K_x = vprod(K_xx)\n\n # structural contribution using linear kernel on binary values\n direct_adjacency = np.kron(g1.A, g2.A)\n K_s = direct_adjacency\n\n # with two of the three components\n K_xd = np.multiply(K_x, K_d)\n K_sd = np.multiply(K_s, K_d)\n K_sx = np.multiply(K_s, K_x)\n # the full kernel with the three components\n K_sxd = np.multiply(K_sx, K_d)\n\n\n # sum all terms of these babies to compute the kernel values\n K_list = [K_sxd.sum(), K_xd.sum(), K_sd.sum(), K_sx.sum(), K_s.sum(), \n K_x.sum(), K_d.sum()]\n \n if self.subkernels:\n # return the full K_sga kernel and other kernels which do not use\n # all three types of features\n return np.array(K_list)\n else:\n # return only the full K_sga kernel \n return K_sxd.sum()", "def optimize_gp_hyperparams(x, y, optimization_steps, learning_rate,\r\n kernel=rational_quadratic_kernel_torch, params=None):\r\n x = np.array(x).reshape(-1, 1)\r\n y = np.array(y).reshape(-1, 1)\r\n N = len(x)\r\n\r\n # tranform our training set in Tensor\r\n x_tensor = torch.from_numpy(x).float()\r\n y_tensor = torch.from_numpy(y).float()\r\n\r\n # we should define our hyperparameters as torch parameters where we keep track of\r\n # the operations to get hte gradients from them\r\n m = np.abs(y[-1][0])\r\n mu = torch.tensor(m).float()\r\n s = np.var(y)\r\n sig = torch.tensor(s).float()\r\n if params:\r\n lambda_param = nn.Parameter(torch.tensor(params['ls']), requires_grad=True)\r\n alpha_param = nn.Parameter(torch.tensor(params['a']), requires_grad=True)\r\n output_variance = nn.Parameter(torch.tensor(params['ov']), requires_grad=True)\r\n noise_variance = nn.Parameter(torch.tensor(params['nv']), requires_grad=True)\r\n mu_param = nn.Parameter(torch.tensor(params['mu']), requires_grad=True)\r\n sig_param = nn.Parameter(torch.tensor(params['sig']), requires_grad=True)\r\n\r\n else:\r\n lambda_param = nn.Parameter(torch.tensor(1.), requires_grad=True)\r\n alpha_param = nn.Parameter(torch.tensor(1.), requires_grad=True)\r\n output_variance = nn.Parameter(torch.tensor(1.), requires_grad=True)\r\n noise_variance = nn.Parameter(torch.tensor(2.5), requires_grad=True)\r\n mu_param = nn.Parameter(mu, requires_grad=True)\r\n sig_param = nn.Parameter(sig, requires_grad=True)\r\n\r\n # we use Adam as optimizer\r\n optim = torch.optim.Adam([lambda_param, alpha_param, output_variance,\r\n noise_variance, mu_param, sig_param], lr=learning_rate)\r\n\r\n # optimization loop using the log-likelihood that involves the cholesky decomposition\r\n nlls = []\r\n lambdas = []\r\n output_variances = []\r\n noise_variances = []\r\n iterations = optimization_steps\r\n for i in range(iterations):\r\n if lambda_param.item() == lambda_param.item():\r\n return_dict = {'ls': lambda_param.item(),\r\n 'a': alpha_param.item(),\r\n 'ov': output_variance.item(),\r\n 'nv': noise_variance.item(),\r\n 'mu': mu_param.item(),\r\n 'sig': sig_param.item()}\r\n assert noise_variance >= 0, f\"ouch! {i, noise_variance}\"\r\n optim.zero_grad()\r\n K = kernel(x_tensor, x_tensor, lambda_param, alpha_param,\r\n output_variance) + noise_variance * torch.eye(N)\r\n try:\r\n cholesky = torch.cholesky(K)\r\n _alpha_temp, _ = torch.solve(y_tensor, cholesky)\r\n _alpha, _ = torch.solve(_alpha_temp, cholesky.t())\r\n except:\r\n return return_dict\r\n nll = N / 2 * torch.log(torch.tensor(2 * np.pi)) + 0.5 * torch.matmul(y_tensor.transpose(0, 1), _alpha) + \\\r\n torch.sum(torch.log(torch.diag(cholesky)))\r\n\r\n # we have to add the log-likelihood of the prior\r\n norm = distributions.Normal(loc=m, scale=s)\r\n prior_negloglike = torch.log(lambda_param) - torch.log(torch.exp(norm.log_prob(lambda_param)))\r\n\r\n nll += 0.9 * prior_negloglike\r\n nll.backward()\r\n\r\n nlls.append(nll.item())\r\n lambdas.append(lambda_param.item())\r\n output_variances.append(output_variance.item())\r\n noise_variances.append(noise_variance.item())\r\n optim.step()\r\n\r\n # projected in the constraints (lengthscale and output variance should be positive)\r\n for p in [lambda_param, output_variance]:\r\n p.data.clamp_(min=0.0000001)\r\n noise_variance.data.clamp_(min=0.001, max=0.05)\r\n alpha_param.data.clamp_(min=0.001, max=0.1)\r\n # mu_param.data.clamp_(min=0.01, max=40)\r\n # sig_param.data.clamp_(min=0.01, max=40)\r\n\r\n return_dict = {'ls': lambda_param.item(),\r\n 'a': alpha_param.item(),\r\n 'ov': output_variance.item(),\r\n 'nv': noise_variance.item(),\r\n 'mu': mu_param.item(),\r\n 'sig': sig_param.item()}\r\n\r\n return return_dict", "def bfgs_method(x0, eps=1e-6, H0=np.eye(18),c1=1e-4):\n k = 0 # initialize num of outer iterations.\n inner_k = 0 # initialize inner k iteration.\n old_xk = None\n alpha_original = 1\n alpha = np.copy(alpha_original)\n xk = x0 # intitialize x.\n Hk = H0 # initialize H, positive definite matrix.\n I = np.eye(len(x0)) # idenitity matrix of 2 by 2.\n\n alpha_vec = []\n f_vec = []\n grad_vec = []\n inner_k = []\n conv_c = []\n\n while np.linalg.norm(rosen_der(xk)) > eps:\n pk = -Hk @ rosen_der(xk)\n\n xk_next = xk + alpha * pk\n ink = 0\n print(xk)\n while rosen(xk_next) > rosen(xk) + c1 * alpha * (pk.T @ rosen_der(xk)):\n \"\"\" find a step size that will satisfy Armijo-Goldstein inequality. Modify alpha. \"\"\"\n alpha = 0.1* alpha\n xk_next = xk + alpha * pk\n ink += 1\n\n inner_k.append(abs(int(ink)))\n\n xk_next = xk + alpha * pk\n\n sk = xk_next - xk\n\n yk = rosen_der(xk_next) - rosen_der(xk)\n\n rho = 1 / (yk.T @ sk)\n\n Hk = np.copy((I - rho * sk @ yk.T) @ Hk @ (I - rho * yk @ sk.T) + rho * sk @ sk.T)\n\n old_xk = np.copy(xk)\n xk = np.copy(xk_next)\n\n alpha_vec.append(alpha)\n f_vec.append(rosen(xk))\n grad_vec.append(np.linalg.norm(rosen_der(xk)))\n alpha = np.copy(alpha_original)\n print(f_vec[-1])\n\n k += 1\n\n return xk, k, inner_k, alpha_vec, f_vec, grad_vec", "def fit(x_train,x_test,y_train,y_test):\n #data=np.loadtxt(\"/home/manjunath/iris/iris.csv\", comments=None, delimiter=',', usecols=(0,1,2,3,4), converters={4: convert_y }) \n\n h=(10,10,10)\n step_size=0.001\n tolerence=0.001\n iteration_max=1000\n iteration=0\n #Regularisation param, added to gradients \n reg=0.01\n \n K=np.unique(y_train).shape[0]\n \n #x=np.loadtxt(\"/home/manjunath/iris/iris.csv\", comments=None, delimiter=',', converters=None, usecols=(0,1,2,3))\n \n \n \"\"\"\n \n train_mean=np.mean(x_train,axis=0)\n x_train=x_train-train_mean\n #std_x = np.sqrt(np.sum(np.square(x_train - train_mean),axis=0)/x_train.shape[1])\n std_x=np.std(x_train,axis=0)\n x_train=x_train/std_x\n \n x_test=x_test - train_mean\n x_test=x_test/std_x\n\n \"\"\"\n\n\n\n y_train=y_to_classification_form(y_train,K)\n y_test=y_to_classification_form(y_test,K)\n\n n_samples,n_features=x_train.shape\n gamma2=np.random.randn(h[0]).reshape(1,h[0])\n beta2=np.random.randn(h[0]).reshape(1,h[0])\n gamma3=np.random.randn(h[1]).reshape(1,h[1])\n beta3=np.random.randn(h[1]).reshape(1,h[1])\n eps=0.001\n \n w1=(np.random.randn(n_features*h[0]).reshape(n_features,h[0]))/np.sqrt(2/(n_features+h[0]))\n w2=(np.random.randn(h[0]*h[1]).reshape(h[0],h[1]))/np.sqrt(2/(h[0]+h[1]))\n w3=(np.random.randn(h[1]*h[2]).reshape(h[1],h[2]))/np.sqrt(2/(h[1]+h[2]))\n \n dw1_priv=np.zeros(w1.shape)\n dw2_priv=np.zeros(w2.shape)\n dw3_priv=np.zeros(w3.shape)\n \n #w3=(np.random.randn(h[1]*K).reshape(h[1],K)*0.5)/np.sqrt(2/h[1]+K)\n #Basically no significance, added bias for completion\n b1 = np.zeros((1,h[0]))\n b2 = np.zeros((1,h[1]))\n b3 = np.zeros((1,K))\n \n while iteration<iteration_max :\n \n #Calculate scores \n scores_layer1=np.dot(x_train,w1)+b1 # 125x4,4x10 = 125x10\n #print(\"iteration\",iteration, \"first layer\",np.any(np.isnan(scores_layer1)))\n #Do not use sigmoid, you will be stuck in long mess of nans and inf and overflows and div by zeros\n #x2=1/1+np.exp(-scores_layer1) # 150 x 4\n \n #Use reLU\n \n #x2=np.maximum(0,scores_layer1)\n bn_x2,bn_cache2=bn.batch_norm_forword(scores_layer1,gamma2,beta2) #125x10\n #print(\"iteration\",iteration, \"first layer BN\",np.any(np.isnan(bn_x2)))\n #x2=relu.relu_forword(bn_x2.T)\n x2=relu.relu_forword(bn_x2) #125x10\n #print(\"iteration\",iteration, \"first layer relu\",np.any(np.isnan(x2)))\n \n score_layer2=np.dot(x2,w2)+b2 #125x10,10x10=125x10\n #print(\"iteration\",iteration, \"second layer\",np.any(np.isnan(score_layer2)))\n bn_x3,bn_cache3=bn.batch_norm_forword(score_layer2,gamma3,beta3) #125x10\n x3=relu.relu_forword(bn_x3) #125x10 \n \n final_scores=np.dot(x3,w3)+b3 # 125x10,10x3=125x3\n \n #Again, use softmax or sigmoid loss for classification, MSE or distance is for regression only \n \n probs=fn.softmax(final_scores) #125x3\n \n \n \n dscores=fn.cross_enropy_grad_singleclass(probs,y_train) # 125x3\n #There is possibility of only 1 class for data, so use below, else the implementation will be bit complex \n #print(x3.shape)\n dw3=np.dot(x3.T,dscores) # 10x125,125x3=10x3\n dx3=np.dot(w3,dscores.T) # 10x3,3x125=10x125\n \n #dhid2=dx3.T\n #dhid2[x3<=0]=0\n \n dhid2=relu.relu_backword(dx3.T,x3) #125x10\n #print(\"dhid2\",dhid2.shape)\n bn_dhid2,dgamma3,dbeta3=bn.batch_norm_backword(dhid2,bn_cache3) #125x10\n #dprod = (x2 * (1- x2)) * dx2.T # this is wrong, find out why, we mostly need to multiply with upstream gradient \n \n dw2=np.dot(x2.T,bn_dhid2) # 10x125,125x10=10x10\n dx2=np.dot(w2,dhid2.T) #10x10,10x125=10x125\n \n #dhid1=dx2.T\n #dhid1[x2<=0]=0\n \n dhid1=relu.relu_backword(dx2.T,x2) #125x10\n \n bn_dx2,dgamma2,dbeta2=bn.batch_norm_backword(dhid1,bn_cache2) #125x10\n #print(dprod.shape)\n \n dw1 = np.dot( x_train.T,bn_dx2) # 125x4,12510=4x10\n\n db1=np.sum(b1,axis=0,keepdims=True) \n db2=np.sum(b2,axis=0,keepdims=True) \n db3=np.sum(b3,axis=0,keepdims=True)\n \n #Regularisation of gradients\n \n #Optimisation\n \n #dw1 = (dw1+dw1_priv)/2\n #dw2 = (dw2+dw2_priv)/2\n #dw3 = (dw3+dw3_priv)/2\n \n dw3 += reg*w3\n dw2 += reg*w2\n dw1 += reg*w1\n \n w1 = w1 - (step_size * dw1)\n w2 = w2 - (step_size * dw2)\n w3 = w3 - (step_size * dw3)\n \n #print(dw1)\n #print(dw2)\n #print(dw3)\n \n #dw1_priv=dw1\n #dw2_priv=dw2\n #dw3_priv=dw3\n \n \"\"\"\n redundant parameters after batch normalization \n \"\"\"\n \n b1 = b1 - (step_size * db1)\n b2 = b2 - (step_size * db2)\n b3 = b3 - (step_size * db3)\n \n\n \n gamma2= gamma2 - (step_size * dgamma2)\n beta2 = beta2 - (step_size * dbeta2)\n gamma3= gamma3 - (step_size * dgamma3)\n beta3 = beta3 - (step_size * dbeta3)\n\n \n if iteration%10 == 0 :\n #print(\"****iteration:\",iteration)\n #x_test /= 10 \n \n s1=np.dot(x_test,w1)\n #px2=1/1+np.exp(-s1)\n bn_x2t,bn_cache2t=bn.batch_norm_forword(s1,gamma2,beta2)\n px2=relu.relu_forword(bn_x2t)\n \n s2=np.dot(px2,w2) \n bn_x3t,bn_cache3t=bn.batch_norm_forword(s2,gamma3,beta3)\n px3=relu.relu_forword(bn_x3t)\n \n out=np.dot(px3,w3)\n \n counter=0\n for y_p,y_a in zip(np.argmax(out,axis=1),y_test):\n if np.argmax(y_a)==y_p:\n counter +=1\n print(\"accuracy: \", (counter/10000) *100,\"%\")\n loss=fn.cross_entropy_loss_singleclass(probs,y_train) # scalar\n print('Loss',loss/n_samples)\n \n dw1_p=np.zeros_like(dw1)\n dw2_p=np.zeros_like(dw2)\n dw3_p=np.zeros_like(dw3)\n \n print(\"dw1\",dw1==dw1_p)\n print(\"dw1\",dw2==dw2_p)\n print(\"dw1\",dw3==dw3_p)\n \n dw1_p=dw1\n dw2_p=dw2\n dw3_p=dw3\n \n #print(\"gamma2\",gamma2)\n #print(\"beta2\",beta2)\n \n iteration=iteration+1\n \n #print('FInal weights are: ', w1,w2)", "def fdist(param1, param2):\n return(prng.gamma(param1, param2))", "def kl2(mu1, mu2, sigma1, sigma2):\n return np.log(sigma2/sigma1) + 0.5 * (sigma1**2/sigma2**2 + (mu2-mu1)**2/sigma2**2 - 1)", "def kl2(mu1, mu2, sigma1, sigma2):\n return np.log(sigma2/sigma1) + 0.5 * (sigma1**2/sigma2**2 + (mu2-mu1)**2/sigma2**2 - 1)", "def div_loss(gamma, model):\n\n def loss(data, y_pred):\n # TODO: Try using points other than the training data points for the divergence calculation.\n \"\"\"Punish non-zero divergence. Each input of size N (batch size) is expanded into\n 4 sets of surrounding points, p1, p2, p3, and p4, where the elements pj_i (j = 1,2,3,4 and\n i = 1,2,3...N) are defined according to:\n\n y\n | p2_i\n |\n | p1_i P_i p3_i\n |\n ------------- x p4_i\n\n\n The sets p1, p2, p3, and p4 are used in estimating the divergence for each point P_i. The partial\n derivatives are calculated with a three-point centered difference.\n\n The extra points are \"smuggled\" into the loss function in the data argument:\n\n data.head() =\n < y_true > <------ p1 -------> <------- p2 ------> <------- p3 ------> <--------p4 ------>\n | u | v | x1 | y1 | h | x2 | y2 | h | x3 | y3 | h | x4 | y4 | h |\n \"\"\"\n y_true = data[:,:2]\n p1 = data[:,2:5]\n p2 = data[:,5:8]\n p3 = data[:,8:11]\n p4 = data[:,11:14]\n\n ### Calculate divergence using model predictions:\n\n # Step 1: Use the model to calculate predicted wind field in the surrounding points p1, p2, p3 and p4.\n y_pred_p1 = model(p1)\n y_pred_p2 = model(p2)\n y_pred_p3 = model(p3)\n y_pred_p4 = model(p4)\n\n # Step 2: Calculate the partial derivatives with a three-point centered difference.\n scale_x = self.scaler_data.scale_[0] #scale-factor for x\n scale_y = self.scaler_data.scale_[1] #scale-factor for y\n\n dudx = (y_pred_p1[:, 0] - y_pred_p3[:, 0]) / (p1[:,0] - p3[:,0]) # <- pj = transformed data\n dvdy = (y_pred_p2[:, 1] - y_pred_p4[:, 1]) / (p2[:,1] - p4[:,1]) # <- pj = transformed data\n\n # Step 3: Calculate the divergence.\n divergence = ( dudx / scale_x + dvdy / scale_y ) * np.mean([scale_x, scale_y])\n #tf.print(K.mean(K.abs(divergence)))\n\n # Step 4: Calculate and return total loss.\n return K.mean(K.square(y_true - y_pred)) + gamma*K.mean(K.square(divergence))\n return loss", "def _grid_search_wl_kernel(\n k: WeisfilerLehman,\n subtree_candidates,\n train_x: list,\n train_y: torch.Tensor,\n lik: float,\n subtree_prior=None, # pylint: disable=unused-argument\n lengthscales=None,\n lengthscales_prior=None, # pylint: disable=unused-argument\n):\n # lik = 1e-6\n assert len(train_x) == len(train_y)\n best_nlml = torch.tensor(np.inf)\n best_subtree_depth = None\n best_lengthscale = None\n best_K = None\n if lengthscales is not None and k.se is not None:\n candidates = [(h_, l_) for h_ in subtree_candidates for l_ in lengthscales]\n else:\n candidates = [(h_, None) for h_ in subtree_candidates]\n\n for i in candidates:\n if k.se is not None:\n k.change_se_params({\"lengthscale\": i[1]})\n k.change_kernel_params({\"h\": i[0]})\n K = k.fit_transform(train_x, rebuild_model=True, save_gram_matrix=True)\n # self.logger.debug(K)\n K_i, logDetK = compute_pd_inverse(K, lik)\n # self.logger.debug(train_y)\n nlml = -compute_log_marginal_likelihood(K_i, logDetK, train_y)\n # self.logger.debug(f\"{i} {nlml}\")\n if nlml < best_nlml:\n best_nlml = nlml\n best_subtree_depth, best_lengthscale = i\n best_K = torch.clone(K)\n # self.logger.debug(f\"h: {best_subtree_depth} theta: {best_lengthscale}\")\n # self.logger.debug(best_subtree_depth)\n k.change_kernel_params({\"h\": best_subtree_depth})\n if k.se is not None:\n k.change_se_params({\"lengthscale\": best_lengthscale})\n k._gram = best_K # pylint: disable=protected-access", "def gradient_other(self):\n # This is just the difference in the feature values\n return self.fvs", "def _grad(V):\n dv = diag(V)\n weights, A, _, AinvB = _weights(dv)\n Ey = (weights.T.dot(Y_control) - Y_treated).getA()\n dGamma0_dV_term2 = zeros(K)\n #dPI_dV = zeros((N0, N1)) # stupid notation: PI = W.T\n #Ai = A.I\n for k in range(K):\n if verbose: # for large sample sizes, linalg.solve is a huge bottle neck,\n print(\"Calculating gradient, linalg.solve() call %s of %s\" % (k ,K,))\n #dPI_dV.fill(0) # faster than re-allocating the memory each loop.\n dA = dA_dV_ki[k]\n dB = dB_dV_ki[k]\n dPI_dV = linalg.solve(A,(dB - dA.dot(AinvB))) \n #dPI_dV = Ai.dot(dB - dA.dot(AinvB))\n dGamma0_dV_term2[k] = np.einsum(\"ij,kj,ki->\",Ey, Y_control, dPI_dV) # (Ey * Y_control.T.dot(dPI_dV).T.getA()).sum()\n return LAMBDA + 2 * dGamma0_dV_term2", "def gradK(self, X, Xstar, param):\n if param == 'gamma':\n eps = 10e-6\n r = l2norm_(X, Xstar) + eps\n first = -np.exp(- (r / self.l) ** self.gamma)\n sec = (r / self.l) ** self.gamma * np.log(r / self.l)\n gamma_grad = first * sec\n return (gamma_grad)\n elif param == 'l':\n r = l2norm_(X, Xstar)\n num = self.gamma * np.exp(-(r / self.l) ** self.gamma) * (r / self.l) ** self.gamma\n l_grad = num / self.l\n return (l_grad)\n elif param == 'sigmaf':\n r = l2norm_(X, Xstar)\n sigmaf_grad = (np.exp(-(r / self.l) ** self.gamma))\n return (sigmaf_grad)\n elif param == 'sigman':\n sigman_grad = kronDelta(X, Xstar)\n return (sigman_grad)\n else:\n raise ValueError('Param not found')", "def KL_bound(k_var, k_ls, sigma_n, N, p_sd, p_success, bound_y, M):\n a = 1. / (4 * np.square(p_sd))\n b = 1. / (2 * np.square(k_ls))\n c = np.sqrt(np.square(a) + 2 * a * b)\n A = a + b + c\n B = b / A\n delta = 1 - p_success\n eigenvalues_sum = k_var * np.sqrt(2 * a / A) * np.power(B, M) / (1 - B)\n two_delta_sn_sq = 2 * delta * np.square(sigma_n)\n first_term = (M+1) * N * eigenvalues_sum / two_delta_sn_sq\n second_term = 1 + bound_y / np.square(sigma_n)\n return first_term * second_term", "def KL(P,Q):\n epsilon = 0.00001\n \n #You may want to instead make copies to avoid changing the np arrays.\n P = P+epsilon\n Q = Q+epsilon\n \n divergence = np.sum(P*np.log(P/Q))\n return divergence", "def gradK(self, X, Xstar, param):\n r = l2norm_(X, Xstar)\n if param == 'l':\n num_one = 5 * r ** 2 * np.exp(-np.sqrt(5) * r / self.l)\n num_two = np.sqrt(5) * r / self.l + 1\n res = num_one * num_two / (3 * self.l ** 3)\n return res\n elif param == 'sigmaf':\n one = (1 + np.sqrt(5 * (r / self.l) ** 2) + 5 * (r / self.l) ** 2 / 3)\n two = np.exp(-np.sqrt(5 * r ** 2))\n return one * two\n elif param == 'sigman':\n return kronDelta(X, Xstar)", "def fit(self, samples):\n\n # variables initilization\n n_samples = samples.shape[0]\n n_features = samples.shape[1]\n cov_x = (1/n_samples)*np.matmul(samples, np.transpose(samples)) # samples covariance matrix\n eigenvalues_x, v = np.linalg.eig(cov_x)\n self.T = 2*abs(max(eigenvalues_x)) + 100\n self.K = 1\n self.marginal_probs = np.ones(shape=self.K) # p(y_i)\n self.cluster_probs = np.ones(shape=(n_samples, self.K)) # p(y_i|x)\n self.cluster_centers = np.zeros(shape=(self.K, n_features))\n mean_average = sum(samples[i]*1/n_samples for i in range(n_samples))\n self.cluster_centers[0] = mean_average\n distance_list = [0.0]\n self.bifurcation_tree.create_node(identifier=0, data={'cluster_id': 0, 'distance': distance_list, 'centroid_position': mean_average})\n idx = 0\n\n while self.T > self.eps_T:\n if self.T < self.T_min:\n self.T = self.eps_T\n\n #print('centers:', self.cluster_centers)\n # print(self.cluster_probs)\n # print(self.K)\n #print('T:', self.T)\n\n old_cluster_centers = np.ones(shape=(self.K, n_features)) # may be replaced by random initialization\n while self.close(self.cluster_centers, old_cluster_centers) > self.convergence_threshold:\n old_cluster_centers = self.cluster_centers.copy()\n\n dist_mat = self.get_distance(samples, self.cluster_centers)\n self.cluster_probs = self._calculate_cluster_probs(dist_mat, self.T)\n\n for i in range(self.K):\n # compute marginal probability\n self.marginal_probs[i] = sum((1/n_samples) * self.cluster_probs[j, i] for j in range(n_samples))\n # compute clusters centers\n self.cluster_centers[i] = sum(samples[j]*(1/n_samples)*self.cluster_probs[j, i] for j in range(n_samples))/self.marginal_probs[i]\n\n # print('old:', old_cluster_centers)\n # print('new:', self.cluster_centers)\n # print('dist:', self.close(self.cluster_centers, old_cluster_centers))\n\n # reduce temperature\n self.T = self.cooling_rate*self.T\n idx += 1\n\n # check critical temperatures and split clusters\n if self.K < self.K_max:\n for j in range(self.K):\n # cluster covariance matrix\n cov_cluster = sum(self.cluster_probs[i, j]*(1/n_samples)/self.marginal_probs[j]*np.outer((samples[i] - self.cluster_centers[j]), (samples[i] - self.cluster_centers[j])) for i in range(n_samples)) # eq. 18 Rose et. al\n eigenvalues_cluster, v = np.linalg.eig(cov_cluster)\n critical_temperature = 2*abs(max(eigenvalues_cluster)) # Theorem 1 Rose et al\n # print('critical_temperature:', critical_temperature)\n if self.T < critical_temperature:\n # split cluster\n splitting_cluster_center = self.cluster_centers[j, :].copy()\n new_cluster_center = self.cluster_centers[j, :].copy() + np.random.normal(0, self.noise, n_features)\n self.cluster_centers = np.vstack((self.cluster_centers, new_cluster_center))\n self.marginal_probs[j] = self.marginal_probs[j]/2\n self.marginal_probs = np.append(self.marginal_probs, self.marginal_probs[j].copy())\n self.K += 1\n dist_mat = self.get_distance(samples, self.cluster_centers)\n self.cluster_probs = self._calculate_cluster_probs(dist_mat, self.T)\n\n # add new nodes to bifurcation tree\n splitting_cluster_identifier = j\n parent_node = self.bifurcation_tree.get_node(nid=splitting_cluster_identifier)\n parent_distances_list = parent_node.data['distance'].copy()\n self.bifurcation_tree.update_node(splitting_cluster_identifier, identifier='parent')\n self.bifurcation_tree.create_node(identifier=splitting_cluster_identifier,\n data={'cluster_id': splitting_cluster_identifier, 'distance': parent_distances_list.copy(), 'centroid_position': splitting_cluster_center, 'direction': 'right'},\n parent='parent')\n distance = self.distance(splitting_cluster_center, new_cluster_center)\n last_parent_distance = parent_distances_list[-1]\n distance_list_new = [last_parent_distance-distance]\n self.bifurcation_tree.create_node(identifier=self.K-1,\n data={'cluster_id': self.K-1, 'distance': distance_list_new, 'centroid_position': new_cluster_center, 'direction': 'left'},\n parent='parent')\n self.bifurcation_tree.update_node('parent', identifier='old'+str(splitting_cluster_identifier)+str(self.bifurcation_tree.size())) # encodes unique id\n self.bifurcation_tree_cut_idx = idx\n break\n\n # log iteration\n if self.T >= self.T_min:\n self.n_eff_clusters.append(self.K)\n self.temperatures.append(self.T)\n distances = self.get_distance(samples, self.cluster_centers)\n distances = np.square(distances)\n distortion = sum((1/n_samples)*self.cluster_probs[i, j]*distances[i, j] for i in range(n_samples) for j in range(self.K))\n self.distortions.append(distortion)\n\n # update position and distance of bifurcation tree leaves\n for j in range(self.K):\n cluster_node = self.bifurcation_tree.get_node(nid=j)\n parent = self.bifurcation_tree.parent(nid=j)\n if parent:\n cluster_centroid_vector = self.cluster_centers[j] # assigns updated cluster centroid position\n parent_centroid_vector = parent.data['centroid_position'].copy()\n parent_child_distance = self.distance(parent_centroid_vector, cluster_centroid_vector) # distance between cluster and parent\n updated_distance_list = cluster_node.data['distance']\n parent_ending_distance = (parent.data['distance'])[-1]\n direction = cluster_node.data['direction']\n if direction == 'left': # check left or right\n new_distance = parent_ending_distance - parent_child_distance\n updated_distance_list.append(new_distance)\n self.bifurcation_tree.update_node(j, data={'cluster_id': j, 'distance': updated_distance_list,\n 'centroid_position': cluster_centroid_vector, 'direction': 'left'})\n else:\n new_distance = parent_ending_distance + parent_child_distance\n updated_distance_list.append(new_distance)\n self.bifurcation_tree.update_node(j, data={'cluster_id': j, 'distance': updated_distance_list,\n 'centroid_position': cluster_centroid_vector,\n 'direction': 'right'})\n else:\n # updates root until the first split\n cluster_centroid_vector = self.cluster_centers[j]\n new_distance = self.distance(mean_average, cluster_centroid_vector)\n updated_distance_list = cluster_node.data['distance']\n updated_distance_list.append(new_distance)\n self.bifurcation_tree.update_node(j, data={'cluster_id': j, 'distance': updated_distance_list,\n 'centroid_position': cluster_centroid_vector})\n\n\n # self.bifurcation_tree.show(nid='old01')\n # print(self.n_eff_clusters)\n # print(self.temperatures)\n # print(self.distortions)\n # print('centers:', self.cluster_centers)\n\n # prints for debug purposes\n # leaves = self.bifurcation_tree.leaves()\n # for node in leaves:\n # print(node.data['cluster_id'])\n # print(node.data['distance'])", "def _ci_grads(preds, dtrain):\n # predictions: np.array with shape of (n, )\n n = preds.shape[0]\n y_hat = preds\n\n # labels: np.array with shape of (n, )\n labels = dtrain.get_label().astype('int')\n E = (labels > 0).astype('int')\n T = np.abs(labels)\n\n # L2 Gradient Computation (Concordance Index Approximation)\n # gradients computation of numerator and denominator in L2\n # initialization\n num, den = .0, .0\n grad_den = np.zeros_like(y_hat)\n hess_den = np.zeros_like(y_hat) # 0\n grad_num = np.zeros_like(y_hat)\n hess_num = np.zeros_like(y_hat)\n\n # firstly, compute gradients of numerator(\\alpha) and denominator(\\beta) in L2\n for k in np.arange(n):\n ## gradients of denominator (\\beta)\n # For set s1 (i.e. \\omega 1 in the paper)\n # s1 = (k, i): E_k = 1 and T_k < T_i\n s1 = E[k] * np.sum(T > T[k])\n # For set s2 (i.e. \\omega 2 in the paper)\n # s2 = (i, k): E_i = 1 and T_i < T_k\n s2 = np.sum((E > 0) * (T < T[k]))\n # For grad_den (i.e. the first-order gradient of denominator)\n grad_den[k] = s2 - s1\n # hess_den[k] = 0\n\n ## gradients of numerator (\\alpha)\n\n # set S1\n # i.e. the first-order and second-order gradients related to set s1\n # s1 = (k, i): E_k = 1 and T_k < T_i\n g_s1, h_s1 = .0, .0\n if E[k] == 1:\n w = y_hat[k] - y_hat[T[k] < T]\n # For den and num\n den += np.sum(-w)\n num += np.sum((w < _GAMMA) * (-w) * (_GAMMA - w)**2)\n\n g_s1 = np.sum((w < _GAMMA) * (_GAMMA - w) * (3*w - _GAMMA))\n\n h_s1 = np.sum((w < _GAMMA) * (4*_GAMMA - 6*w))\n \n # set S2\n # i.e. the first-order and second-order gradients related to set s2\n w = y_hat[(E > 0) * (T < T[k])] - y_hat[k]\n g_s2 = np.sum((w < _GAMMA) * (_GAMMA - w) * (_GAMMA - 3*w))\n h_s2 = np.sum((w < _GAMMA) * (4*_GAMMA - 6*w))\n \n grad_num[k] = g_s2 + g_s1\n hess_num[k] = h_s2 + h_s1\n\n if den == 0:\n grad_f = np.zeros_like(y_hat)\n hess_f = np.zeros_like(y_hat)\n else:\n grad_f = grad_num / den - num * grad_den / (den ** 2)\n hess_f = (den * hess_num - num * hess_den) / (den ** 2) - 2 * grad_den / den * grad_f\n \n return grad_f, hess_f", "def global_loss(bce_loss, mu, log_var):\n kl_divergence = 0.5 * torch.sum(1 + log_var - mu.pow(2) - log_var.exp())\n return bce_loss - kl_divergence", "def KL_bound2(k_var, k_ls, sigma_n, N, p_sd, p_success, M):\n a = 1. / (4 * np.square(p_sd))\n b = 1. / (2 * np.square(k_ls))\n c = np.sqrt(np.square(a) + 2 * a * b)\n A = a + b + c\n B = b / A\n delta = 1 - p_success\n delta_sn_sq = delta * np.square(sigma_n)\n eigenvalues_sum = k_var * np.sqrt(2 * a / A) * np.power(B, M) / (1 - B)\n return (M+1) * N * eigenvalues_sum / delta_sn_sq", "def gradient_descent(self, x, y):\n # Initialize weights vector\n self.weights = np.zeros(len(x[0]))\n\n # Storing number of training example in a variable \n n = len(x)\n\n # Initiate variables to keep track of the current and smallest loss recorded\n lowest_loss = sys.float_info.max\n current_loss = sys.float_info.max\n\n # Initiate variables to keep track of step sizes\n norm = sys.float_info.max\n smallest_norm = sys.float_info.max\n\n # Initiate list variable that stores all previous weights\n prev_weights = []\n\n # Initiate list that stores all the errors. \n errors = []\n \n # Variable to keep track of the number of iterations that returns a bigger loss than current loss\n k_loss_iteration = 1\n\n # Learning loop\n for i in range(self.max_iter):\n\n # Append current weights\n prev_weights.append(np.array(self.weights))\n \n # Minimizing Loss Function Error by adjusting weights using Gradient Descent\n self.weights += self.learning_rate * (sum([x[i] * (y[i] - self.logistic_function(self.weights.dot(x[i]))) for i in range(n)]) - 2 * self.l2 * self.weights)\n\n # Compute the error of the Cost Function and store it in a list\n current_loss = self.cost(x,y)\n\n if len(errors) > 1 and current_loss > errors[-1]:\n k_loss_iteration += 1\n else: \n k_loss_iteration = 1\n\n errors.append(current_loss)\n \n # Track smallest loss\n if current_loss < lowest_loss:\n lowest_loss = current_loss\n\n # Compute the L2 Norm of the difference between current weights and previous weights\n norm = np.linalg.norm(self.weights - prev_weights[-1])\n\n # Track smallest step size and set it as error threshold\n if norm < smallest_norm:\n smallest_norm = norm\n\n # If this L2 norm is smaller than the error_threshold it means that it converged, hence we can break. In other words, repeat until the step size is too small\n if self.error_threshold != None and norm < self.error_threshold:\n print(\"Converged after {} iterations!\".format(i))\n break\n\n # stop if error hasn't gone down in k iterations\n if k_loss_iteration >= 10:\n print(k_loss_iteration + \" iterations of loss not decreasing on {}th itertion.\".format(i))\n break\n\n # Log final weights\n print(\"Final norm: \" + str(norm) + \"\\nSmallest step size recorded: \" + str(smallest_norm) + \"\\nFinal error: \" + str(current_loss) + \"\\nLowest error recorded: \" + str(lowest_loss) + \"\\nNumber of epochs: \" + str(len(errors)) + \"\\nFinal weights: \" + str(self.weights))", "def solve (grad_f, hess_f, x_init=None, TOL=1e-6, rel_conv=True,\r\n gamma=1,MAX_ITER=100):\r\n\r\n d = 2*TOL\r\n k = 0\r\n\r\n xks = []\r\n\r\n if x_init is None:\r\n x_init = np.zeros(grad_f.shape[0])\r\n x = x_init\r\n\r\n xks.append(x)\r\n\r\n while (d > TOL and k < MAX_ITER):\r\n\r\n #Write your code here\r\n\r\n return xks", "def test_gradient_convergence(self):\n pass", "def gradK(self, X, Xstar, param='l'):\n if param == 'l':\n r = l2norm_(X, Xstar)\n num = r ** 2 * self.sigmaf * np.exp(-r ** 2 / (2 * self.l ** 2))\n den = self.l ** 3\n l_grad = num / den\n return (l_grad)\n elif param == 'sigmaf':\n r = l2norm_(X, Xstar)\n sigmaf_grad = (np.exp(-.5 * r ** 2 / self.l ** 2))\n return (sigmaf_grad)\n\n elif param == 'sigman':\n sigman_grad = kronDelta(X, Xstar)\n return (sigman_grad)\n\n else:\n raise ValueError('Param not found')", "def runge(s1: float, s2: float, L: float, m: float):\n return (s2 - s1) / (L**m - 1)", "def _kernel(self, x1, x2, beta=1):\n d = (x1 - x2)**2\n return np.exp(-beta * d)", "def hyperOpt_lossfun(optVals, keywords):\n\n # Recover N & K\n N = keywords[\"dat\"][\"y\"].shape[0]\n K = keywords[\"LL_terms\"][\"K\"]\n method = keywords[\"method\"]\n dat = keywords[\"dat\"]\n weights = keywords[\"weights\"]\n\n # Reconstruct the prior covariance\n hyper = keywords[\"hyper\"].copy()\n\n count = 0\n for val in keywords[\"optList\"]:\n if np.isscalar(hyper[val]):\n hyper.update({val: 2**optVals[count]})\n count += 1\n else:\n hyper.update({val: 2**optVals[count:count + K]})\n count += K\n\n # Determine type of analysis (standard, constant, or day weights)\n if method is None:\n w_N = N\n # the first trial index of each new day\n days = np.cumsum(dat[\"dayLength\"], dtype=int)[:-1]\n missing_trials = dat[\"missing_trials\"]\n elif method == \"_constant\":\n w_N = 1\n days = np.array([], dtype=int)\n missing_trials = None\n elif method == \"_days\":\n w_N = len(dat[\"dayLength\"])\n days = np.arange(1, w_N, dtype=int)\n missing_trials = None\n else:\n raise Exception(\"method \" + method + \" not supported\")\n\n invSigma = make_invSigma(hyper, days, missing_trials, w_N, K)\n ddlogprior = -invSigma\n\n # Retrieve terms for decoupled Laplace appx.\n H = keywords[\"LL_terms\"][\"H\"]\n LL_v = keywords[\"LL_v\"]\n\n # Decoupled Laplace appx to new epsilon given new sigma\n DL_1 = DTv(LL_v, K)\n DL_2 = DT_X_D(ddlogprior, K)\n DL_3 = spsolve(DL_2 + H, DL_1)\n E_flat = Dv(DL_3, K)\n\n # Calculate likelihood and prior terms with new epsilon\n pT, lT, _ = getPosteriorTerms(\n E_flat, hyper=hyper, method=method, dat=dat, weights=weights)\n\n # Calculate posterior term, then approximate evidence for new sigma\n center = DL_2 + lT[\"ddlogli\"][\"H\"]\n logterm_post = (1 / 2) * sparse_logdet(center)\n\n evd = pT[\"logprior\"] + lT[\"logli\"] - logterm_post\n\n return -evd", "def analytic_dLdp(q,ps,C1s,C0s,ks,bs,sigma=1):\n n_p=len(ps)\n r=np.linalg.norm(ps-q,axis=1).reshape(-1,1)\n r_hat=(ps-q)/r\n t_hat=np.zeros(r_hat.shape)\n t_hat[:,0]=-r_hat[:,1]\n t_hat[:,1]=r_hat[:,0]\n\n dLdeta=np.zeros(n_p).reshape(-1,1)\n dLdr=np.zeros(n_p).reshape(-1,1)\n\n\n for i in range(n_p):\n Keta=2*(ks[i]*bs[i])**2/(sigma**2) * (r[i]-C1s[i])**(2*bs[i]-2)\n Kr=2*(ks[i]*bs[i])**2/(sigma**2) * (bs[i]-1) * (r[i]-C1s[i])**(2*bs[i]-3)\n sum_eta=sum_kr=0\n for j in range(n_p):\n \n rkrj=np.max([np.min([r_hat[i,:].dot(r_hat[j,:]),1]),-1])\n \n direction=np.sign(np.linalg.det(r_hat[[j,i],:]))\n\n sum_eta += (ks[j]*bs[j])**2 * (r[j]-C1s[j])**(2*bs[j]-2) * rkrj * np.sqrt(1-rkrj**2) * direction\n sum_kr += (ks[j]*bs[j])**2 * (r[j]-C1s[j])**(2*bs[j]-2) * (1-rkrj**2)\n \n dLdeta[i]=Keta*sum_eta\n dLdr[i]=Kr*sum_kr\n \n dLdp = dLdr * r_hat + (dLdeta/r) * t_hat\n \n \n return dLdp", "def Bates_Granger_2(df_train, df_test, nu=None):\n\n # number of individual forecasts and number of periods\n K = df_test.shape[1]\n T = df_train.shape[0]\n\n # the default length of the relevant window is equal to sample length\n if nu is None:\n nu = T\n\n if nu > T:\n raise ValueError('Parameter nu must be <= length of training sample')\n\n # check whether there is enough observations, so sigma is invertible\n if nu < K:\n raise ValueError('Parameter nu must be >= no. of individual forecasts')\n\n # forecast errors\n errors = df_train.iloc[:, 1:].subtract(df_train.iloc[:, 0], axis=0)\n\n # initialize the covariance matrix sigma\n sigma = np.full((K, K), fill_value=0, dtype=float)\n\n # fill the covariance matrix sigma\n for i in range(K):\n\n for j in range(K):\n\n sigma[i, j] = np.dot(errors.iloc[errors.shape[0]-nu:, i],\n errors.iloc[errors.shape[0]-nu:, j]) / nu\n\n # combining weights\n nominator = np.linalg.solve(sigma, np.full(K, fill_value=1))\n denominator = np.dot(np.full(K, fill_value=1), nominator)\n comb_w = nominator / denominator\n\n # censoring the combining weights\n for i in range(K):\n if comb_w[i] < 0:\n comb_w[i] = 0\n if comb_w[i] > 1:\n comb_w[i] = 1\n\n # rescale the weights so that their sum equals 1\n comb_w = comb_w/comb_w.sum()\n\n # predictions\n df_pred = pd.DataFrame({\"Bates-Granger (2)\": df_test.dot(comb_w)})\n\n return df_pred", "def kl(mu1, mu2):\n return (mu2-mu1)**2/2", "def kl(mu1, mu2):\n return (mu2-mu1)**2/2", "def expert_likelihood(self, X, y): #give to it a proper name!!!\n\t\tgaussians_mean = self.experts_predictions(X) #(N,K) X*W + b\n\t\ty = np.repeat( np.reshape(y, (len(y),1)), self.K, axis = 1) #(N,K)\n\n\t\t#print('sigma: ', self.sigma)\n\t\tres = scipy.stats.norm.pdf( np.divide((y - gaussians_mean), self.sigma) ) #(N,K)\n\t\treturn np.divide(res, self.sigma) #normalizing result" ]
[ "0.64265364", "0.6414014", "0.64129555", "0.6334975", "0.63160306", "0.6193587", "0.61617297", "0.61293685", "0.61277765", "0.6109303", "0.6043571", "0.603763", "0.60138285", "0.59585667", "0.59533715", "0.5900521", "0.5898689", "0.58980924", "0.5893038", "0.58471286", "0.58447254", "0.5840847", "0.5839249", "0.58268327", "0.58098537", "0.5806762", "0.58031523", "0.5799583", "0.57845694", "0.57832813", "0.5776751", "0.57453007", "0.573904", "0.5726252", "0.57113", "0.57040256", "0.56879413", "0.5685738", "0.567255", "0.56592596", "0.5654616", "0.5643348", "0.56428623", "0.56423867", "0.56396115", "0.56352407", "0.56339896", "0.5632402", "0.56323427", "0.5618313", "0.56156313", "0.56131685", "0.5611698", "0.55907243", "0.55887145", "0.55864507", "0.55831677", "0.5581019", "0.5577528", "0.55733794", "0.5569992", "0.5568292", "0.55650634", "0.55492496", "0.55428696", "0.55424035", "0.5540933", "0.55359226", "0.5534201", "0.5517856", "0.5515892", "0.551111", "0.5504538", "0.5499337", "0.549741", "0.549741", "0.54970425", "0.54960185", "0.5486249", "0.54834944", "0.54730725", "0.5468004", "0.54640466", "0.5459855", "0.54564744", "0.5455161", "0.5453019", "0.545078", "0.5448334", "0.54434294", "0.5437577", "0.5437121", "0.54306763", "0.5429962", "0.5429294", "0.541964", "0.5419125", "0.5417439", "0.5417439", "0.5415131" ]
0.5683974
38
Convert real world points with Doppler to rangeDoppler coordinates PARAMETERS
def convert_to_rd_points(data, world_camera): distances = np.sqrt((data[:, 0] - world_camera[0])**2 + \ (data[:, 1] - world_camera[1])**2) dopplers = data[:, 2] rd_points = [(distances[i], dopplers[i]) for i in range(data.shape[0])] return rd_points
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_points(self, divisions=100):", "def build_coord(norm, d, pts):\n # Compute the origin as the mean point of the points, and this point has to be on the plane\n \n n = len(pts) \n x_total = 0\n y_total = 0\n z_total = 0\n \n for i in range(n):\n x_total += pts[i][0]\n y_total += pts[i][1]\n z_total += pts[i][2]\n\n x_o = x_total * 1.0 / n\n y_o = y_total * 1.0 / n\n z_o = z_total * 1.0 / n\n p_o = [x_o, y_o, z_o]\n \n # Choose p be the projection of a vector in the z-axis to the plane\n # If the plane is not perpendicular to the z-axis\n if ((norm[2] != 1) and (norm[2] != -1)): \n # Choose a point\n o_z = [x_o, y_o, z_o + 1]\n \n [[x_p, y_p, z_p]] = proj_to_plane(norm, d, [o_z])\n \n dist = np.linalg.norm([x_p - x_o, y_p - y_o, z_p - z_o])\n\n x_c = (x_p - x_o) * 1.0 / dist \n y_c = (y_p - y_o) * 1.0 / dist\n z_c = (z_p - z_o) * 1.0 / dist\n # Thus we have unit vector in x direction\n e_y = [x_c, y_c, z_c]\n #Compute the unit vector in y direction\n e_x = np.cross(e_y, norm).tolist()\n else:\n e_x = [1, 0, 0]\n e_y = [0, 1, 0]\n \n return [e_x, e_y, norm] , p_o", "def build_coord(norm, d, pts):\n # Compute the origin as the mean point of the points, and this point has to be on the plane\n \n n = len(pts)\n x_total = 0\n y_total = 0\n z_total = 0\n \n for i in range(n):\n x_total += pts[i][0]\n y_total += pts[i][1]\n z_total += pts[i][2]\n\n x_o = x_total * 1.0 / n\n y_o = y_total * 1.0 / n\n z_o = z_total * 1.0 / n\n p_o = [x_o, y_o, z_o]\n \n # Choose p be the projection of a vector in the z-axis to the plane\n # If the plane is not perpendicular to the z-axis\n if ((norm[2] != 1) and (norm[2] != -1)): \n # Choose a point\n o_z = [x_o, y_o, z_o + 1]\n \n [[x_p, y_p, z_p]] = proj_to_plane(norm, d, [o_z])\n \n dist = np.linalg.norm([x_p - x_o, y_p - y_o, z_p - z_o])\n\n x_c = (x_p - x_o) * 1.0 / dist \n y_c = (y_p - y_o) * 1.0 / dist\n z_c = (z_p - z_o) * 1.0 / dist\n # Thus we have unit vector in x direction\n e_y = [x_c, y_c, z_c]\n #Compute the unit vector in y direction\n e_x = np.cross(e_y, norm).tolist()\n else:\n e_x = [1, 0, 0]\n e_y = [0, 1, 0]\n \n return [e_x, e_y, norm] , p_o", "def derive(params):\n x, y, dx, dy = params\n r = (x ** 2 + y ** 2) ** 0.5\n return np.array([dx, dy, -G * M * x / (r ** 3), -G * M * y / (r ** 3)])", "def pyr_point_translator(x, y, org_l, dest_l):\n dest_x = (2.0 ** (org_l - dest_l)) * x\n dest_y = (2.0 ** (org_l - dest_l)) * y\n return np.array([dest_x, dest_y]).transpose()", "def point_list(self,res,llc,urc,direction):\n\t\tif direction == 2:\n\t\t\tZdist=urc[2]-llc[2]\n\t\t\tnumPoints=int(numpy.ceil(Zdist/res))\n\t\t\tdeltaZ=Zdist/numPoints\n\t\t\tpoints=[llc+numpy.array([0,0,deltaZ*i]) for i in range(numPoints)]\n\t\t\treturn points, points[0], points[-1]\n\t\tif direction == 1:\n\t\t\tZdist=urc[1]-llc[1]\n\t\t\tnumPoints=int(numpy.ceil(Zdist/res))\n\t\t\tdeltaZ=Zdist/numPoints\n\t\t\tpoints=[llc+numpy.array([0,deltaZ*i,0]) for i in range(numPoints)]\n\t\t\treturn points, points[0], points[-1]\n\t\tif direction == 0:\n\t\t\tZdist=urc[0]-llc[0]\n\t\t\tnumPoints=int(numpy.ceil(Zdist/res))\n\t\t\tdeltaZ=Zdist/numPoints\n\t\t\tpoints=[llc+numpy.array([deltaZ*i,0,0]) for i in range(numPoints)]\n\t\t\treturn points, points[0], points[-1]", "def random_gps_gen_from_range(s_lat,n_lat, e_lon, w_lon):\n #print(s_lat, n_lat, e_lon, w_lon)\n latitude = random.uniform(s_lat, n_lat)\n longitude = random.uniform(e_lon, w_lon)\n return latitude, longitude", "def CreateTargetGeoMap(latS, latN, lonW, lonE, latlen, lonlen):\n\n lat_grid = np.linspace(latS, latN, latlen)\n lon_grid = np.linspace(lonW, lonE, lonlen)\n\n return lat_grid,lon_grid", "def world_to_grid(mapdata, wp):\n WX = wp.x\n WY = wp.y\n resol = mapdata.info.resolution\n # -0.5 but coordinates to center\n gx = math.floor((WX - mapdata.info.origin.position.x) / resol - 0.5)\n gy = math.floor((WY - mapdata.info.origin.position.y) / resol - 0.5)\n return gx, gy", "def p2p_xyz(start_point, end_point, top_left_cor, cellsize, dem):\n start_cell = (int((start_point[0] - top_left_cor[0]) / cellsize[0]),\n int((start_point[1] - top_left_cor[1]) / cellsize[1]))\n end_cell = (int((end_point[0] - top_left_cor[0]) / cellsize[0]),\n int((end_point[1] - top_left_cor[1]) / cellsize[1]))\n cells = misc.get_line(start_cell, end_cell) \n pnts = []\n elev = []\n \n dem_elv = dem[:,1]\n dem_indx = dem[:,2:4]\n\n for cell in cells:\n x = top_left_cor[0] + cell[0] * cellsize[0] + cellsize[0] / 2\n y = top_left_cor[1] + cell[1] * cellsize[1] + cellsize[1] / 2\n #xy_indx=[str(cell[0]),str(cell[1])]\n z_indx=np.logical_and(np.equal(dem_indx[:,0],cell[0]),np.equal(dem_indx[:,1],cell[1]))\n try:\n z=dem_elv[z_indx][0]\n except (np.sum(z_indx)>1):\n print(\"Oops! That was more than one indices in dem matching the query index (in getCellValue)\")\n #z_indx = [i for i,j in enumerate(dem_indx) if j == xy_indx]\n z = float(dem_elv[z_indx])\n pnts.append((x, y))\n elev.append(z)\n return pnts, elev", "def to_world(self, x, y, **kwargs):", "def convertFrom1Dto2D(coord, num_cols):\n y = int(np.floor(coord/num_cols))\n x = coord % num_cols\n return (y,x)", "def coordinates(self):", "def project(self, (lng, lat)):\n x = lng * DEG_TO_RAD\n lat = max(min(MAX_LATITUDE, lat), -MAX_LATITUDE)\n y = lat * DEG_TO_RAD\n y = math.log(math.tan((math.pi / 4) + (y / 2)))\n return (x*EARTH_RADIUS, y*EARTH_RADIUS)", "def gen_coordinates(self):\n start = (0, self.dimensions.gradient_height / 2)\n end = self.dimensions.invert_point(start)\n radius = self.pythagorean(self.dimensions.gradient_center)\n coords = []\n theta = self.degrees + 180\n while theta <= 360:\n x, y = self.dimensions.gradient_center\n dx = self.get_change_in_x(x, radius, theta)\n dy = self.get_change_in_y(y, radius, theta)\n # process start/end point to fix to gradient\n start = self.adjust_to_rectangle((dx, dy), theta)\n start = Layer.add_gradient_offset(start)\n end = self.dimensions.invert_point(start)\n\n coords.append((start, end))\n theta += self.degrees\n\n self.coords = coords", "def LL2LocalRicom(pointsLL,lat0=0,long0=0,latoff=0,longoff=0): \n import math\n bigr = 6378136.0\n dlong = longoff-long0\n dlat = latoff-lat0\n clat0 = math.cos(lat0*math.pi/180.0)\n numPts = len(pointsLL)\n n = 0\n points = []\n while n<numPts:\n x = (pointsLL[n][0]+dlong)*bigr*(math.pi/180.0)*clat0\n y = (pointsLL[n][1]+dlat)*bigr*(math.pi/180.0)\n points.append([x,y])\n n+=1\n \n return points", "def det_to_world(self, x, y):\n ra, dec = x, y\n return ra, dec", "def __init__(self, \n nd = 2, \n goal = np.array([1.0,1.0]),\n state_bound = [[0,1],[0,1]],\n nA = 4,\n action_list = [[0,1],[0,-1],[1,0],[-1,0]],\n<<<<<<< HEAD:archive-code/puddleworld.py\n ngrid = [10.0,10.0],\n maxStep = 40):\n ngrid = [40, 40]\n x_vec = np.linspace(0,1,ngrid[0])\n y_vec = np.linspace(0,1,ngrid[1])\n for x in x_vec:\n for y in y_vec:\n if ~self.inPuddle([x,y]):\n puddle.append([x,y])\n # puddle is a closed loop \n outpuddlepts = np.asarray(puddle)\n \"\"\"\n\n\n # Horizontal wing of puddle consists of \n # 1) rectangle area xch1<= x <=xc2 && ych1-radius <= y <=ych2+radius\n # (xchi,ychi) is the center points (h ==> horizantal)\n # x, y = state[0], state[1]\n xch1, ych1 = 0.3, 0.7\n xch2, ych2 = 0.65, ych1\n radius = 0.1\n\n\n #Vertical wing of puddle consists of \n # 1) rectangle area xcv1-radius<= x <=xcv2+radius && ycv1 <= y <= ycv2\n # where (xcvi,ycvi) is the center points (v ==> vertical)\n xcv1 = 0.45; ycv1=0.4;\n xcv2 = xcv1; ycv2 = 0.8;\n\n # % 2) two half-circle at end edges of rectangle\n \n # POINTS ON HORIZANTAL LINES OF PUDDLE BOUNDARY\n for x in np.arange(xch1,xcv1-radius,self.meshsize[0]/2):\n puddle.append([x,ych1-radius])\n puddle.append([xcv1-radius,ych1-radius])\n \n for x in np.arange(xcv1+radius,xch2,self.meshsize[0]/2):\n puddle.append([x,ych1-radius])\n \n for x in np.arange(xch1,xcv1-radius,self.meshsize[0]/2):\n puddle.append([x,ych1+radius])\n \n puddle.append([xcv1-radius,ych1+radius])\n\n\n for x in np.arange(xcv1+radius,xch2,self.meshsize[0]/2):\n puddle.append([x,ych1+radius])\n\n # POINTS ON VERTICAL LINES OF PUDDLE BOUNDARY\n for y in np.arange(ycv1,ych1-radius,self.meshsize[1]/2):\n puddle.append([xcv1-radius,y])\n \n for y in np.arange(ycv1,ych1-radius,self.meshsize[1]/2):\n puddle.append([xcv1+radius,y])\n \"\"\"\n for y in np.arrange():\n puddle.append([])\n \n for y in np.arrange():\n puddle.append([])\n \"\"\"\n\n # HALF CIRCLES\n ngridTheta = 10\n thetaVec = np.linspace(0,pi,ngridTheta)\n\n for t in thetaVec:\n puddle.append([xch1+radius*np.cos(pi/2+t),ych1+radius*np.sin(pi/2+t)])\n\n for t in thetaVec:\n puddle.append([xch2+radius*np.cos(-pi/2+t),ych2+radius*np.sin(-pi/2+t)])\n\n for t in thetaVec:\n puddle.append([xcv1+radius*np.cos(pi+t),ycv1+radius*np.sin(pi+t)])\n\n for t in thetaVec:\n puddle.append([xcv2+radius*np.cos(t),ycv2+radius*np.sin(t)])\n\n \n outpuddlepts = np.asarray(puddle)\n return outpuddlepts", "def ricomLocal2LL(points,lat0=0,long0=0,latoff=0,longoff=0): \n import math\n bigr = 6378136.0\n dlong = longoff-long0\n dlat = latoff-lat0\n clat0 = math.cos(lat0*math.pi/180.0)\n numPts = len(points)\n n = 0\n pointsLL=[]\n while n<numPts:\n x = points[n][0]/(bigr*(math.pi/180.0)*clat0) - dlong\n y = points[n][1]/(bigr*(math.pi/180.0)) - dlat\n pointsLL.append([x,y])\n n+=1\n \n return pointsLL", "def light_source_directions():\n L = np.array([[-0.06059872, -0.44839055, 0.8917812],\n [-0.05939919, -0.33739538, 0.93948714],\n [-0.05710194, -0.21230722, 0.97553319],\n [-0.05360061, -0.07800089, 0.99551134],\n [-0.04919816, 0.05869781, 0.99706274],\n [-0.04399823, 0.19019233, 0.98076044],\n [-0.03839991, 0.31049925, 0.9497977],\n [-0.03280081, 0.41611025, 0.90872238],\n [-0.18449839, -0.43989616, 0.87889232],\n [-0.18870114, -0.32950199, 0.92510557],\n [-0.1901994, -0.20549935, 0.95999698],\n [-0.18849605, -0.07269848, 0.97937948],\n [-0.18329657, 0.06229884, 0.98108166],\n [-0.17500445, 0.19220488, 0.96562453],\n [-0.16449474, 0.31129005, 0.93597008],\n [-0.15270716, 0.4160195, 0.89644202],\n [-0.30139786, -0.42509698, 0.85349393],\n [-0.31020115, -0.31660118, 0.89640333],\n [-0.31489186, -0.19549495, 0.92877599],\n [-0.31450962, -0.06640203, 0.94692897],\n [-0.30880699, 0.06470146, 0.94892147],\n [-0.2981084, 0.19100538, 0.93522635],\n [-0.28359251, 0.30729189, 0.90837601],\n [-0.26670649, 0.41020998, 0.87212122],\n [-0.40709586, -0.40559588, 0.81839168],\n [-0.41919869, -0.29999906, 0.85689732],\n [-0.42618633, -0.18329412, 0.88587159],\n [-0.42691512, -0.05950211, 0.90233197],\n [-0.42090385, 0.0659006, 0.90470827],\n [-0.40860354, 0.18720162, 0.89330773],\n [-0.39141794, 0.29941372, 0.87013988],\n [-0.3707838, 0.39958255, 0.83836338],\n [-0.499596, -0.38319693, 0.77689378],\n [-0.51360334, -0.28130183, 0.81060526],\n [-0.52190667, -0.16990217, 0.83591069],\n [-0.52326874, -0.05249686, 0.85054918],\n [-0.51720021, 0.06620003, 0.85330035],\n [-0.50428312, 0.18139393, 0.84427174],\n [-0.48561334, 0.28870793, 0.82512267],\n [-0.46289771, 0.38549809, 0.79819605],\n [-0.57853599, -0.35932235, 0.73224555],\n [-0.59329349, -0.26189713, 0.76119165],\n [-0.60202327, -0.15630604, 0.78303027],\n [-0.6037003, -0.04570002, 0.7959004],\n [-0.59781529, 0.06590169, 0.79892043],\n [-0.58486953, 0.17439091, 0.79215873],\n [-0.56588359, 0.27639198, 0.77677747],\n [-0.54241965, 0.36921337, 0.75462733],\n [0.05220076, -0.43870637, 0.89711304],\n [0.05199786, -0.33138635, 0.9420612],\n [0.05109826, -0.20999284, 0.97636672],\n [0.04919919, -0.07869871, 0.99568366],\n [0.04640163, 0.05630197, 0.99733494],\n [0.04279892, 0.18779527, 0.98127529],\n [0.03870043, 0.30950341, 0.95011048],\n [0.03440055, 0.41730662, 0.90811441],\n [0.17290651, -0.43181626, 0.88523333],\n [0.17839998, -0.32509996, 0.92869988],\n [0.18160174, -0.20480196, 0.96180921],\n [0.18200745, -0.07490306, 0.98044012],\n [0.17919505, 0.05849838, 0.98207285],\n [0.17329685, 0.18839658, 0.96668244],\n [0.1649036, 0.30880674, 0.93672045],\n [0.1549931, 0.41578148, 0.89616009],\n [0.28720483, -0.41910705, 0.8613145],\n [0.29740177, -0.31410186, 0.90160535],\n [0.30420604, -0.1965039, 0.9321185],\n [0.30640529, -0.07010121, 0.94931639],\n [0.30361153, 0.05950226, 0.95093613],\n [0.29588748, 0.18589214, 0.93696036],\n [0.28409783, 0.30349768, 0.90949304],\n [0.26939905, 0.40849857, 0.87209694],\n [0.39120402, -0.40190413, 0.8279085],\n [0.40481085, -0.29960803, 0.86392315],\n [0.41411685, -0.18590756, 0.89103626],\n [0.41769724, -0.06449957, 0.906294],\n [0.41498764, 0.05959822, 0.90787296],\n [0.40607977, 0.18089099, 0.89575537],\n [0.39179226, 0.29439419, 0.87168279],\n [0.37379609, 0.39649585, 0.83849122],\n [0.48278794, -0.38169046, 0.78818031],\n [0.49848546, -0.28279175, 0.8194761],\n [0.50918069, -0.1740934, 0.84286803],\n [0.51360856, -0.05870098, 0.85601427],\n [0.51097962, 0.05899765, 0.8575658],\n [0.50151639, 0.17420569, 0.84742769],\n [0.48600297, 0.28260173, 0.82700506],\n [0.46600106, 0.38110087, 0.79850181],\n [0.56150442, -0.35990283, 0.74510586],\n [0.57807114, -0.26498677, 0.77176147],\n [0.58933134, -0.1617086, 0.7915421],\n [0.59407609, -0.05289787, 0.80266769],\n [0.59157958, 0.057798, 0.80417224],\n [0.58198189, 0.16649482, 0.79597523],\n [0.56620006, 0.26940003, 0.77900008],\n [0.54551481, 0.36380988, 0.7550205]], dtype=float)\n return L", "def CreateReleasePoints(points_on_longitude, points_on_latitude, grids):\n \n ReleasePointsLon = []\n ReleasePointsLat = []\n \n GridsCW_array = np.asarray(grids[['min_lon', 'min_lat', 'max_lon', 'max_lat']])\n \n for i in range(len(GridsCW_array)):\n \n lon_space = np.linspace(GridsCW_array[i,0], GridsCW_array[i,2], num = points_on_longitude+2 )\n lat_space = np.linspace(GridsCW_array[i,1], GridsCW_array[i,3], num = points_on_latitude+2 )\n \n \n lon_space_cor = lon_space[1:-1]\n lat_space_cor = lat_space[1:-1]\n \n for j in lon_space_cor:\n for k in lat_space_cor:\n \n ReleasePointsLon.append(j)\n ReleasePointsLat.append(k)\n \n return ReleasePointsLon, ReleasePointsLat", "def translate(x_range=0, y_range=0):\r\n x = rand_val(x_range)\r\n y = rand_val(y_range)\r\n return np.array(((1, 0, x),\r\n (0, 1, y),\r\n (0, 0, 1)), dtype=np.float)", "def makeToCoordinates(fromCoords, Uframe, Vframe, scale):\n\n out = []\n\n for e in fromCoords:\n x = e[0]\n y = e[1]\n toX = Uframe[x][y]\n toY = Vframe[x][y]\n out.append((int(round(x+toX*scale)),int(round(y+toY*scale))))\n\n return out", "def visualise_points_on_rd(rd_matrix, path, points, range_res, doppler_res):\n rd_img = SignalVisualizer(rd_matrix).get_image\n for point in points:\n range_coord = (point[0] / range_res).astype(int)\n doppler_coord = (point[1] / doppler_res).astype(int)\n if point[1] < 0:\n doppler_coord += int(rd_matrix.shape[1]/2 - 1)\n else:\n doppler_coord += int(rd_matrix.shape[1]/2)\n rd_img[range_coord*4:(range_coord*4+4),\n doppler_coord*4:(doppler_coord*4+4)] = [0., 0., 0.]\n plt.imsave(path, rd_img)\n plt.close()", "def sample_pin_position_range():\n #Create a sample goniometer\n g = TopazInHouseGoniometer()\n\n #Initialize the leg limits\n g.relative_sample_position = column([0.0, 0.0, 0.0])\n g.getplatepos(0.0, 0.0, 0.0)\n g.calculate_leg_xy_limits(visualize=True)\n\n# if True:\n# pylab.show()\n# return\n\n n = 17\n positions = np.linspace(-8, 8, n) #Range calculated in mm\n allowed = np.zeros( (n,n,n) )\n for (ix, x) in enumerate(positions):\n print \"Calculating x\", x\n for (iy, y) in enumerate(positions):\n for (iz, z) in enumerate(positions):\n #Set up\n g.relative_sample_position = column([x, y, z])\n allowed[ix,iy,iz] = g.are_angles_allowed([0., 0., 0.], return_reason=False)\n\n #Do a plot\n\n pylab.figure(1, figsize=[15,15])\n pylab.title(\"Allowable XZ sample positions\")\n for (iy, y) in enumerate(positions):\n print \"At y of\", y, \", # of points = \", np.sum( allowed[:, iy,:])\n if iy < 16:\n pylab.subplot(4,4,iy+1)\n pylab.pcolor(positions, positions, allowed[:, iy, :].transpose(), norm=pylab.Normalize(0, 1))\n pylab.xlabel(\"x\")\n pylab.ylabel(\"z\")\n pylab.title(\"y = %.3f mm\" % y)\n pylab.draw()\n pylab.axis('equal')\n pylab.show()\n #pylab.", "def tanp_to_world(self, x, y):\n ra, dec = x, y\n return ra, dec", "def proj_to_plane(norm, d, pts):\n a = norm[0]\n b = norm[1]\n c = norm[2]\n\n p = []\n\n for i in range(len(pts)):\n x_p = pts[i][0]\n y_p = pts[i][1]\n z_p = pts[i][2]\n\n if a != 0:\n x_0 = (b * b + c * c) * x_p - a * b * y_p - a * c * z_p - a * d\n y_0 = (b * 1.0 / a) * (x_0 - x_p) + y_p\n z_0 = (c * 1.0 / a) * (x_0 - x_p) + z_p\n\n elif b != 0:\n x_0 = x_p \n y_0 = c * c * y_p - b * (d + c)\n z_0 = (c * 1.0 / b) *(y_0 - y_p) + z_p\n\n else:\n x_0 = x_p\n y_0 = y_p\n z_0 = - d * 1.0 / c\n\n p.append([x_0, y_0, z_0])\n \n return p", "def proj_to_plane(norm, d, pts):\n a = norm[0]\n b = norm[1]\n c = norm[2]\n\n p = []\n\n for i in range(len(pts)):\n x_p = pts[i][0]\n y_p = pts[i][1]\n z_p = pts[i][2]\n\n if a != 0:\n x_0 = (b * b + c * c) * x_p - a * b * y_p - a * c * z_p - a * d\n y_0 = (b * 1.0 / a) * (x_0 - x_p) + y_p\n z_0 = (c * 1.0 / a) * (x_0 - x_p) + z_p\n\n elif b != 0:\n x_0 = x_p \n y_0 = c * c * y_p - b * (d + c)\n z_0 = (c * 1.0 / b) *(y_0 - y_p) + z_p\n\n else:\n x_0 = x_p\n y_0 = y_p\n z_0 = - d * 1.0 / c\n\n p.append([x_0, y_0, z_0])\n \n return p", "def fd_system_uniform_grid(r):\n dr = r[1] - r[0]\n n = r.size\n\n d = np.full(n, -2/dr**2)\n d[-1] = 1\n\n l = np.zeros(n) # noqa:E741\n l[1:-1] = (1-0.5/np.arange(1, n-1))/dr**2\n\n u = np.zeros(n)\n u[0] = 2/dr**2\n u[1:-1] = (1+0.5/np.arange(1, n-1))/dr**2\n\n return l, d, u", "def _maping(x,y,l,a):\n newx = (x**2 *(l* ((x**2 + y**2)**(a/2) - 1) + 2) - l * y**2 *((x**2 + y**2)**(a/2) - 1))/(x**2 + y**2) \n newy = (2 * x* y *(l* ((x**2 + y**2)**(a/2) - 1) + 1))/(x**2 + y**2)\n return newx, newy", "def get_correct_coords(start_x=0,viewing_distance=12.0,field_height=10,field_width=10,pixel_width=0.282,pixel_height=0.282,**config):\n \n x = (start_x + np.arange(np.ceil(-field_width/2.0),np.ceil(field_width/2.0),1))*pixel_width\n y = np.arange(np.ceil(-field_height/2.0),np.ceil(field_height/2.0),1)*pixel_height\n x,y = np.meshgrid(x,y)\n coords = np.vstack((x.ravel(),y.ravel())).T\n return coords", "def latlon_2_grid(x, y, z, origin):\n new_y = (y - origin[1]) * 111111\n new_x = (x - origin[0]) * (111111 * np.cos(origin[1] * (np.pi/180)))\n return new_x, new_y, z", "def coords_to_residue(self, rnext: bool = ...) -> None:\n ...", "def GPSlatlon2XY(data_sheet, origin, theta):\n\n\tlon = np.array([[data_sheet.cell(row = i, column = 1).value] for i in range(2, data_sheet.max_row+1)]).reshape(data_sheet.max_row-1, )\n\tlat = np.array([[data_sheet.cell(row = i, column = 2).value] for i in range(2, data_sheet.max_row+1)]).reshape(data_sheet.max_row-1, )\n\n\tlon_u = np.array([[data_sheet.cell(row = i, column = 5).value] for i in range(2, data_sheet.max_row+1)]).reshape(data_sheet.max_row-1, )\n\tlat_u = np.array([[data_sheet.cell(row = i, column = 6).value] for i in range(2, data_sheet.max_row+1)]).reshape(data_sheet.max_row-1, )\n\tUz = np.array([[data_sheet.cell(row = i, column = 4).value] for i in range(2, data_sheet.max_row+1)]).reshape(data_sheet.max_row-1, )\n\n\tlon_in_km = (lon - origin[0])*111*np.cos(lat*np.pi/180)\n\tlat_in_km = (lat - origin[1])*111\n\t\n\trho_u = np.sqrt(np.power(lon_u,2) + np.power(lat_u,2))\n\ttheta_new_u = np.arctan2(lat_u,lon_u) - theta\n\n\trho = np.sqrt(np.power(lon_in_km,2) + np.power(lat_in_km,2))\n\ttheta_new = np.arctan2(lat_in_km,lon_in_km) - theta\n\n\tX, Y = rho*np.cos(theta_new), rho*np.sin(theta_new)\n\tUx, Uy = rho_u*np.cos(theta_new_u), rho_u*np.sin(theta_new_u)\n\n\treturn 1e3*X, 1e3*Y, 1e-3*Ux, 1e-3*Uy, 1e-3*Uz", "def Reproject(x, y, in_grid = 4326, out_grid = 32737):\n \n inProj = Proj(init='epsg:'+str(in_grid))\n outProj = Proj(init='epsg:'+str(out_grid))\n \n \n x2,y2 = transform(inProj,outProj,x,y)\n \n return x2, y2", "def camera_2_world(self, o, d):\r\n wo = self.camera2world_point @ ti.Vector([o.x, o.y, o.z, 1.0])\r\n wd = self.camera2world_vec @ d\r\n return ti.Vector([wo.x,wo.y,wo.z]), wd", "def parameter_range(p, v):\n if p.endswith('_pd_n'):\n return [0, 100]\n elif p.endswith('_pd_nsigma'):\n return [0, 5]\n elif p.endswith('_pd_type'):\n return v\n elif any(s in p for s in ('theta', 'phi', 'psi')):\n # orientation in [-180,180], orientation pd in [0,45]\n if p.endswith('_pd'):\n return [0, 45]\n else:\n return [-180, 180]\n elif 'sld' in p:\n return [-0.5, 10]\n elif p.endswith('_pd'):\n return [0, 1]\n elif p == 'background':\n return [0, 10]\n elif p == 'scale':\n return [0, 1e3]\n elif p == 'case_num':\n # RPA hack\n return [0, 10]\n elif v < 0:\n # Kxy parameters in rpa model can be negative\n return [2*v, -2*v]\n else:\n return [0, (2*v if v > 0 else 1)]", "def get_coordinates():\n\tallowed_range = [0,1,2]\n\trow = int(input(\"Enter row: \")) - 1\n\tcol = int(input(\"Enter column: \")) - 1", "def get_directions():\n return [(1, 0), (0, 1), (-1, 0), (0, -1)]", "def ED(X,Y):", "def gen_gps_to_coords(lat,lon,rows,cols,min_lat,max_lat,min_lon,max_lon):\n\n if (lat <= min_lat or lat >= max_lat or lon <= min_lon or lon >= max_lon):\n return (-1,-1)\n\n lat_step = abs(max_lat-min_lat)/rows\n lon_step = abs(max_lon-min_lon)/cols\n\n lat_spot = int((max_lat-lat)/lat_step)\n lon_spot = int((lon-min_lon)/lon_step)\n #print \"lat: %f lon: %f lat_spot: %f lon_spot: %f\" % (lat,lon,lat_spot,lon_spot)\n return (lat_spot,lon_spot)", "def converts_into_linear(Rr, Rl, L):\n a = Rr/2\n b = Rl/2\n c = Rr/L\n d = -Rl/L\n return [a, b, c, d]", "def lorentzian2d(p, x, y):\n #2012-02-04 11:38 IJMC: Created\n \n x = array(x, dtype=float).copy()\n y = array(y, dtype=float).copy()\n p = array(p).copy()\n\n if len(p)==5:\n p = concatenate((p, [0, 0]))\n elif len(p)==6:\n p = concatenate((p, [0]))\n\n z = ((x - p[3]) / p[1])**2 + ((y - p[4]) / p[2])**2 + p[5] * (x - p[3]) * (y - p[4])\n \n return p[6] + p[0]/(1. + z)", "def rpoints(self):\n return self.gmap.interp_gpos(self.points)", "def makeUpCoords(numb):\n # bounds of UK in EPSG:4326\n minLat=49.96\n maxLat=60.84\n minLon=-7.5\n maxLon=1.78\n # generate array of random numbers\n lon=np.random.rand(numb)*(maxLon-minLon)+minLon\n lat=np.random.rand(numb)*(maxLat-minLat)+minLat\n return(lon,lat)", "def generateCoord(self, resolutionList):\r\n locatorList = []\r\n\r\n print \"Scanning Eye\"\r\n self.getEyeCoord(locatorList, resolutionList[0])\r\n print \"Got Eye Coord\"\r\n print \"Scanning NoseBridge\"\r\n self.getNoseBridgeCoord(locatorList, resolutionList[5])\r\n print \"Got NoseBridge Coord\"\r\n print \"Scanning Nose\"\r\n self.getNoseCoord(locatorList, resolutionList[3])\r\n print \"Got Nose Coord\"\r\n print \"Scanning Mouth\"\r\n self.getMouthCoord(locatorList, resolutionList[1])\r\n print \"Got Mouth Coord\"\r\n print \"Scanning MouthLoop\"\r\n self.getMouthLoopCoord(locatorList, resolutionList[2])\r\n print \"Got MouthLoop Coord\"\r\n print \"Scanning Eyebrow\"\r\n self.getEyebrowCoord(locatorList, resolutionList[4])\r\n print \"Got Eyebrow Coord\"\r\n print \"Scanning Ear\"\r\n self.getEarCoord(locatorList)\r\n print \"Got Ear Coord\"\r\n print \"Scanning SideProfile\"\r\n self.getSideProfileCoord(locatorList)\r\n print \"Got SideProfile Coord\"\r\n\r\n print \"Scanning FrontProfile\"\r\n self.getFrontProfileCoord(locatorList)\r\n print \"Got FrontProfile Coord\"\r\n\r\n #Grouping locatorList\r\n cmds.select(locatorList)\r\n locatorGrp = cmds.group(name = \"LocatorCoordGrp#\")\r\n\r\n self.scaleToUnitVolume(locatorGrp)\r\n\r\n self.reverseName(locatorGrp)\r\n for locator in locatorList:\r\n if \"SideProfile_Coord\" in locator:\r\n cmds.move(0, locator, x=True, ws=True)\r\n return locatorGrp", "def get_first_quadrant(self):\n num_copies_x = ceil(self.max_x / self.room_x)\n num_copies_x = int(num_copies_x)\n num_copies_y = ceil(self.max_y / self.room_y)\n num_copies_y = int(num_copies_y)\n\n player_exp_x = []\n player_exp_y = []\n guard_exp_x = []\n guard_exp_y = []\n # Loop expands along the x axis\n for i in range(0, num_copies_x + 1, 1):\n temp_player_y_list = []\n temp_guard_y_list = []\n r_x = self.room_x * i\n\n if len(player_exp_x) == 0:\n n_p_p_x = self.player_x\n else:\n n_p_p_x = (r_x - player_exp_x[-1][0]) + r_x\n player_exp_x.append([n_p_p_x, self.player_y, 1])\n\n if len(guard_exp_x) == 0:\n n_g_p_x = self.guard_x\n else:\n n_g_p_x = (r_x - guard_exp_x[-1][0]) + r_x\n guard_exp_x.append([n_g_p_x, self.guard_y, 7])\n\n # Loop expands along the x axis\n for j in range(1, num_copies_y + 1, 1):\n r_y = self.room_y * j\n if len(temp_guard_y_list) == 0:\n n_g_p_y = (r_y - self.guard_y) + r_y\n temp_guard_y_list.append(n_g_p_y)\n else:\n n_g_p_y = (r_y - temp_guard_y_list[-1]) + r_y\n temp_guard_y_list.append(n_g_p_y)\n guard_exp_y.append([n_g_p_x, n_g_p_y, 7])\n\n if len(temp_player_y_list) == 0:\n n_p_p_y = (r_y - self.player_y) + r_y\n temp_player_y_list.append(n_p_p_y)\n else:\n n_p_p_y = (r_y - temp_player_y_list[-1]) + r_y\n temp_player_y_list.append(n_p_p_y)\n player_exp_y.append([n_p_p_x, n_p_p_y, 1])\n\n return player_exp_x + guard_exp_x + player_exp_y + guard_exp_y", "def origin() -> Tuple[float, float]:\n return (DIMENSION[0] / 2, DIMENSION[1] / 2)", "def convert(coordinates):\n center = np.mean(coordinates, axis=0, dtype=np.float32)\n x = np.subtract(np.array(coordinates, dtype=np.float32), center)\n rho, phi = cart2pol(x[:, 0], x[:, 1])\n result = np.swapaxes(np.array([rho, phi], dtype=np.float32), 0, 1)\n\n # normalize rho values to range[0-1]\n result[:, 0] = normalize(result[:, 0].reshape(1, -1), norm='max')\n return result", "def dd2dm(lat,lon):\r\n lat_d = int(abs(lat)) #calculate latitude degrees\r\n lat_m = (abs(lat) - lat_d) * 60. #calculate latitude minutes\r\n\r\n lon_d = int(abs(lon))\r\n lon_m = (abs(lon) - lon_d) * 60.\r\n \r\n la=lat_d*100.+lat_m\r\n lo=lon_d*100.+lon_m\r\n return la,lo", "def GetParametricCoords(self):\n ...", "def set_maprange(xmin, ymin, xmax, ymax, epsg_in='epsg:4326'):\n outProj = pyproj.Proj(init='epsg:3857')\n inProj = pyproj.Proj(init=epsg_in)\n xmin,ymin = 75, -55\n xmax,ymax = 175, -5\n x1,y1 = pyproj.transform(inProj,outProj,xmin,ymin)\n x2,y2 = pyproj.transform(inProj,outProj,xmax,ymax)\n return x1, y1, x2, y2", "def CreateTargetGeoField(nbtimestep,latlen,lonlen):\n\n pres_grid = np.zeros((nbtimestep, latlen, lonlen))\n u_grid = np.zeros((nbtimestep, latlen, lonlen))\n v_grid = np.zeros((nbtimestep, latlen, lonlen))\n\n return pres_grid,u_grid,v_grid", "def doppler(self, lmdas):\n lmda_0 = 656.3\n velocities = []\n for lmda in lmdas:\n velocities.append((lmda - lmda_0)*constants.c/lmda_0)\n return np.array(velocities)", "def undirected_new_robot_coordinates(old_x, old_y):\n # even if we allow for wide range of movement, filter still works well\n d = random.normalvariate(100,1)\n theta = random.uniform(0, 2*math.pi)\n new_x = old_x + d * math.cos(theta)\n new_y = old_y + d * math.sin(theta)\n return new_x, new_y", "def get_2d_cartesian_grid(num_pts_1d, ranges):\n # from math_tools_cpp import cartesian_product_double as cartesian_product\n from PyDakota.math_tools import cartesian_product\n x1 = np.linspace(ranges[0], ranges[1], num_pts_1d)\n x2 = np.linspace(ranges[2], ranges[3], num_pts_1d)\n abscissa_1d = []\n abscissa_1d.append(x1)\n abscissa_1d.append(x2)\n grid = cartesian_product(abscissa_1d, 1)\n return grid", "def initialCoordinates():\r\n return (-250,-250)", "def _convertCdfPointsToDistr(self,pts):\n try:\n return self.ppf(pts.real)\n except TypeError:\n return list(self.ppf(x) for x in pts)", "def world_to_det(self, ra, dec):\n x, y = ra, dec\n return x, y", "def Dist(p1,p2):\n x1, y1 = p1\n x2, y2 = p2\n return (((x1-x2)*(x1-x2)) + ((y1-y2)*(y1-y2)))**0.5", "def waypts2setpts(P, params):\n\tV = params.drone_vel # [m/s]\n\tfreq = params.ViconRate; dt = 1./freq\n\tdx = V * dt\n\ttraj_global = np.array(P[-1])\n\tfor i in range(len(P)-1, 0, -1):\n\t\tA = P[i]\n\t\tB = P[i-1]\n\n\t\tn = (B-A) / norm(B-A)\n\t\tdelta = n * dx\n\t\tN = int( norm(B-A) / norm(delta) )\n\t\tsp = A\n\t\ttraj_global = np.vstack([traj_global, sp])\n\t\tfor i in range(N):\n\t\t\tsp += delta\n\t\t\ttraj_global = np.vstack([traj_global, sp])\n\t\tsp = B\n\t\ttraj_global = np.vstack([traj_global, sp])\n\n\treturn traj_global", "def defineCoords(dimensions, steps):\n ### NOT CURRENTLY USED\n \n print(\"1D\")\n xCoords = np.arange(-X/2, X/2+dx, dx) # 1D \n fxCoords = np.arange(0, X/dx+dx)\n fxCoords = fxCoords - fxCoords[-1]/2 # Shift everything over so the center of array is at f = 0\n realCoords = xCoords\n fourierCoords = fxCoords\n realSpace = np.zeros(Nx+1)\n xRealSpace = np.zeros(Nx)\n #realSpace = np.zeros(Nx) #1D\n \n fourierSpace = np.zeros_like(realSpace, complex)\n\n return realCoords, fourierCoords, realSpace, fourierSpace", "def convert_coords(x, y, conversion):\n if conversion == \"cartesian\" :\n # convert to cartesian plane coordinates \n x_new = x - (width/2)\n y_new = (height/2) + y \n\n elif conversion == \"pygame\":\n # only needed to place images in pygame\n x_new = x + (width/2)\n y_new = (height/2) - y\n \n return x_new, y_new", "def to_grid(point: np.array) -> np.array:\n return np.array((2.5, 2.5)) + point * 5", "def uvmap(self, p):\n # bottom left corner of the plane\n p00 = self.position - (self.sx * self.n0) / 2 - (self.sy * self.n1) / 2\n dif_vector = p - p00\n u = np.dot(dif_vector, self.n0) / self.sx\n v = np.dot(dif_vector, self.n1) / self.sy\n return u, v", "def define_range():\n\n def_range = {'lt': [0.0, 24.0],\n 'lon': [0.0, 360.0],\n 'angle': [0.0, 2.0 * np.pi]}\n\n return def_range", "def _world_to_sensor(cords, sensor):\n\n sensor_world_matrix = ClientSideBoundingBoxes.get_matrix(sensor.get_transform())\n world_sensor_matrix = np.linalg.inv(sensor_world_matrix)\n sensor_cords = np.dot(world_sensor_matrix, cords)\n return sensor_cords", "def pto_depth_map(self, velo_points,\n H=64, W=512, C=5, dtheta=np.radians(0.4), dphi=np.radians(90./512.0)):\n\n x, y, z, i = velo_points[:, 0], velo_points[:, 1], velo_points[:, 2], velo_points[:, 3]\n d = np.sqrt(x ** 2 + y ** 2 + z**2)\n r = np.sqrt(x ** 2 + y ** 2)\n d[d==0] = 0.000001\n r[r==0] = 0.000001\n phi = np.radians(45.) - np.arcsin(y/r)\n phi_ = (phi/dphi).astype(int)\n phi_[phi_<0] = 0\n phi_[phi_>=512] = 511\n\n # print(np.min(phi_))\n # print(np.max(phi_))\n #\n # print z\n # print np.radians(2.)\n # print np.arcsin(z/d)\n theta = np.radians(2.) - np.arcsin(z/d)\n # print theta\n theta_ = (theta/dtheta).astype(int)\n # print theta_\n theta_[theta_<0] = 0\n theta_[theta_>=64] = 63\n #print theta,phi,theta_.shape,phi_.shape\n # print(np.min((phi/dphi)),np.max((phi/dphi)))\n #np.savetxt('./dump/'+'phi'+\"dump.txt\",(phi_).astype(np.float32), fmt=\"%f\")\n #np.savetxt('./dump/'+'phi_'+\"dump.txt\",(phi/dphi).astype(np.float32), fmt=\"%f\")\n # print(np.min(theta_))\n # print(np.max(theta_))\n\n depth_map = np.zeros((H, W, C))\n # 5 channels according to paper\n if C == 5:\n depth_map[theta_, phi_, 0] = x\n depth_map[theta_, phi_, 1] = y\n depth_map[theta_, phi_, 2] = z\n depth_map[theta_, phi_, 3] = i\n depth_map[theta_, phi_, 4] = d\n else:\n depth_map[theta_, phi_, 0] = i\n return depth_map", "def g2Dto1D(g2D, L):\n\n g2D = np.array(g2D)\n dL = np.array(L)/np.array(g2D.shape) # boxes separation in each direction\n r_max = np.min(L)/2 # maximum radius to be calculated in number of boxes\n\n g1D_dic = DictList() # hash table of radii and values at radii\n\n for i in range(g2D.shape[0]):\n for j in range(g2D.shape[1]):\n radius = np.sqrt(np.sum((np.array((i, j))*dL)**2)) # radius corresponding to coordinates [i, j], [-i, j], [i, -j], [-i, -j]\n if radius <= r_max:\n g1D_dic[radius] += [g2D[i, j], g2D[-i, j], g2D[i, -j],\n g2D[-i, -j]]\n\n return np.array(list(map(\n lambda radius: [radius, np.mean(g1D_dic[radius])],\n sorted(g1D_dic))))", "def convert(points):\n distance = []\n for i in points:\n x = int(i[0])\n y = int(i[1])\n distance.append([x,y])\n return distance", "def transform(self,points):\n new_points = []\n for p in points:\n new_coordinates=p.coordinates\n new_coordinates = [(new_coordinates[i] - self.min_coordinate[i]) /\n (self.max_coordinate[i]-self.min_coordinate[i]) for i in range(len(p.coordinates))]\n new_points.append(Point(p.name, new_coordinates, p.label))\n return new_points", "def test_custom_lon_lat_range():\n import astropy.units as u\n lrange = [-50,-40]\n brange = [-10,10]\n spiral_arm = survey.get_spiral_slice(track = \"Carina_far\", \n lrange = lrange, \n brange = brange)\n spiral_arm2 = survey.get_spiral_slice(track = \"CrF\", \n lrange = lrange*u.deg, \n brange = brange*u.deg)\n\n assert np.allclose(spiral_arm[\"INTEN\"], spiral_arm2[\"INTEN\"], equal_nan = True)", "def generate_regular_grid_point_coords(R, side_size, device):\n aff = torch.tensor([[[0.5, 0, 0.5], [0, 0.5, 0.5]]], device=device)\n r = F.affine_grid(aff, torch.Size((1, 1, side_size, side_size)), align_corners=False)\n return r.view(1, -1, 2).expand(R, -1, -1)", "def onReceivedPositionAndRange(self, droneName: str, position: Vec2, yaw: float, ranges: List[int]):\n\n realYaw = yaw\n points: List[Vec2] = []\n\n position['x'] -= self.initialDronePos[droneName]['x']\n position['y'] -= self.initialDronePos[droneName]['y']\n\n if self.mission['type'] == 'argos':\n xtemp = position['x']\n position['x'] = position['y']\n position['y'] = xtemp\n elif self.mission['type'] == 'crazyradio':\n xtemp = position['x']\n position['x'] = - position['y']\n position['y'] = - xtemp\n realYaw = yaw + math.pi / 4\n\n xPos = position['x'] + self.offsetDronePos[droneName]['x']\n yPos = position['y'] + self.offsetDronePos[droneName]['y']\n\n i = 0\n for r in ranges:\n if r > self.maxRange:\n i += 1\n continue\n point = Vec2(\n x=round(r * self.RANGE_SCALE * math.cos(realYaw + i * math.pi / 2)\n * (-2 * (self.mission['type'] == 'argos') + 1) + xPos, 4),\n y=round(r * self.RANGE_SCALE * math.sin(realYaw + i * math.pi / 2)\n * (-2 * (self.mission['type'] != 'argos') + 1) + yPos, 4)\n )\n if self.checkPointValidity((point['x'], point['y'])):\n points.append(point)\n i += 1\n self.handlePositionAndBorders(\n droneName, Vec2(x=xPos, y=yPos), points)", "def to_raw(self):\n return [self.poses[i].to_absolute_xyz_rxryrz() + self.wrenches[i].parameters() for i in range(len(self.poses))]", "def _reconstruct(x, y, r1, r2, ll, gamma, rho, sigma):\n V_r1 = gamma * ((ll * y - x) - rho * (ll * y + x)) / r1\n V_r2 = -gamma * ((ll * y - x) + rho * (ll * y + x)) / r2\n V_t1 = gamma * sigma * (y + ll * x) / r1\n V_t2 = gamma * sigma * (y + ll * x) / r2\n return [V_r1, V_r2, V_t1, V_t2]", "def ret2dva(xret, yret):\n return xret / 280.0, yret / 280.0", "def coords_to_gps(self,coords):\n return ((self.max_lat - (self.lat_step * (0.5+coords[0]))),(self.min_lon + (self.lon_step * (0.5+coords[1]))))", "def create_range_map(points_xyz: NDArrayFloat) -> NDArrayByte:\n range = points_xyz[..., 2]\n range = np.round(range).astype(int)\n color = plt.get_cmap(\"turbo\")(np.arange(0, range.max() + 1))\n color = color[range]\n range_cmap: NDArrayByte = (color * 255.0).astype(np.uint8)\n return range_cmap", "def find_single_fit_range(lorentz_params):\n f0 = lorentz_params[1]\n FWHM = lorentz_params[2]\n return (f0 - 4 * FWHM, f0 - 2 * FWHM, f0 + 2 * FWHM, f0 + 4 * FWHM)", "def transform_domains(points, from_domain, to_domain, use_mp=False, dps=None):\n with mp.workdps(dps or mp.dps):\n fl = mp.mpf if use_mp else float\n a, b = map(fl, to_domain)\n c, d = map(fl, from_domain)\n scale = (b-a)/(d-c)\n trans = a - (b-a)*c/(d-c)\n return [min(b, max(a, scale*p + trans)) for p in points]", "def get_floor_reference_points():\n global Z # This declaration is needed to modify the global variable Z\n global floor_reference_points # Maybe erase.\n global floor_reference_orientations # Maybe erase.\n \n #Z = (-0.04311285564353425 -0.04512672573083166 -0.04080078888404003 -0.046071914959185875)/4\n #Z= -0.04721129960500225\n Z = -0.15113003072395247\n print(Z)\n# [0.5264201148167275, 0.40034933311487086, -0.027560670871152958]\n# Point 1 = [0.5264201148167275, 0.40034933311487086, -0.027560670871152958]\n# Move the LEFT arm to point 2 and press enter.\n# Move the LEFT arm to point 3 and press enter.\n# Point 3 = [0.8164126163781988, 0.00011724257622775782, -0.006060458646583389]\n# Move the LEFT arm to point 4 and press enter.\n# Point 4 = [0.5774338486223564, -0.02912627450728407, -0.02923769860966796]\n# Point 1 = [0.45835412247904794, 0.4167330917312844, -0.11362745036843477]\n# Move the LEFT arm to point 2 and press enter.\n# Point 2 = [0.7046556740624649, 0.45390428836232344, -0.11322759071560898]\n# Move the LEFT arm to point 3 and press enter.\n# Point 3 = [0.7778487250094798, 0.07406413897305184, -0.11181591166991744]\n# Move the LEFT arm to point 4 and press enter.\n# Point 4 = [0.5418466718761972, 0.034360381218309734, -0.11464607923115094]\n\n #return [[p1[0],p1[1]], [p2[0],p2[1]], [p3[0],p3[1]], [p4[0],p4[1]]]\n \n #print p4\n filename = \"/home/sampath/midca/examples/_gazebo_baxter/calibration.txt\"\n f = open(filename, 'r')\n p1 = f.readline().split(' ')\n p2 = f.readline().split(' ')\n p3 = f.readline().split(' ')\n p4 = f.readline().split(' ')\n \n p1[0] = float(p1[0])\n p1[1] = float(p1[1])\n p2[0] = float(p2[0])\n p2[1] = float(p2[1])\n p3[0] = float(p3[0])\n p3[1] = float(p3[1])\n p4[0] = float(p4[0])\n p4[1] = float(p4[1])\n \n return [[p1[0], p1[1]],\n [p2[0], p2[1]],\n [p3[0], p3[1]],\n [p4[0], p4[1]]]", "def _generate_coordinates(self):\n a0 = +0.2969\n a1 = -0.1260\n a2 = -0.3516\n a3 = +0.2843\n a4 = -0.1036 # zero thickness TE\n\n x = np.linspace(0.0, 1.0, num=self.n_points)\n\n if len(self.digits) == 4:\n # Returns n+1 points in [0 1] for the given 4-digits NACA string\n m = float(self.digits[0]) / 100.0\n p = float(self.digits[1]) / 10.0\n t = float(self.digits[2:]) / 100.0\n\n # half-thickness distribution\n yt = 5 * t * (a0 * np.sqrt(x) + a1 * x + a2 * np.power(x, 2) +\n a3 * np.power(x, 3) + a4 * np.power(x, 4))\n\n if p == 0:\n # Symmetric foil\n self.xup_coordinates = np.linspace(0.0, 1.0, num=self.n_points)\n self.yup_coordinates = yt\n self.xdown_coordinates = np.linspace(\n 0.0, 1.0, num=self.n_points)\n self.ydown_coordinates = -yt\n else:\n # Cambered foil\n xc1 = np.asarray([xx for xx in x if xx <= p])\n xc2 = np.asarray([xx for xx in x if xx > p])\n yc1 = m / np.power(p, 2) * xc1 * (2 * p - xc1)\n yc2 = m / np.power(1 - p, 2) * (1 - 2 * p + xc2) * (1 - xc2)\n # Y-coordinates of camber line\n yc = np.append(yc1, yc2)\n\n if self.cosine_spacing:\n # points are generated according to cosine distribution of\n # the X-coordinates of the chord\n dyc1_dx = m / np.power(p, 2) * (2 * p - 2 * xc1)\n dyc2_dx = m / np.power(1 - p, 2) * (2 * p - 2 * xc2)\n dyc_dx = np.append(dyc1_dx, dyc2_dx)\n theta = np.arctan(dyc_dx)\n self.xup_coordinates = x - yt * np.sin(theta)\n self.yup_coordinates = yc + yt * np.cos(theta)\n self.xdown_coordinates = x + yt * np.sin(theta)\n self.ydown_coordinates = yc - yt * np.cos(theta)\n else:\n # Linear spacing distribution of the foil coordinates\n self.xup_coordinates = np.linspace(\n 0.0, 1.0, num=self.n_points)\n self.xdown_coordinates = np.linspace(\n 0.0, 1.0, num=self.n_points)\n self.yup_coordinates = yc + yt\n self.ydown_coordinates = yc - yt\n\n elif len(self.digits) == 5:\n # Returns n+1 points in [0 1] for the given 5-digits NACA string\n cld = float(self.digits[0]) * 0.15\n p = 5.0 * float(self.digits[1]) / 100.0\n s = float(self.digits[2])\n t = float(self.digits[3:]) / 100.0\n\n # half-thickness distribution\n yt = 5 * t * (a0 * np.sqrt(x) + a1 * x + a2 * np.power(x, 2) +\n a3 * np.power(x, 3) + a4 * np.power(x, 4))\n\n if s == 1:\n # Relfex camber\n P = np.array([0.1, 0.15, 0.2, 0.25])\n M = np.array([0.13, 0.2170, 0.318, 0.441])\n K = np.array([51.99, 15.793, 6.520, 3.191])\n elif s == 0:\n # Standard camber\n P = np.array([0.05, 0.1, 0.15, 0.2, 0.25])\n M = np.array([0.0580, 0.1260, 0.2025, 0.2900, 0.3910])\n K = np.array([361.4, 51.64, 15.957, 6.643, 3.230])\n else:\n raise ValueError(\n 'For NACA \"LPSTT\" the value of \"S\" can be either 0 or 1.')\n\n if p == 0:\n # Symmetric foil\n self.xup_coordinates = np.linspace(0.0, 1.0, num=self.n_points)\n self.yup_coordinates = yt\n self.xdown_coordinates = np.linspace(\n 0.0, 1.0, num=self.n_points)\n self.ydown_coordinates = -yt\n else:\n # Cambered foil\n spl_m = splrep(P, M)\n spl_k = splrep(M, K)\n m = splev(p, spl_m)\n k1 = splev(m, spl_k)\n xc1 = np.asarray([xx for xx in x if xx <= m])\n xc2 = np.asarray([xx for xx in x if xx > m])\n yc1 = k1 / 6.0 * (np.power(xc1, 3) - 3 * m * np.power(xc1, 2) +\n np.power(m, 2) * (3 - m) * xc1)\n yc2 = k1 / 6.0 * np.power(m, 3) * (1 - xc2)\n yc = np.append(yc1, yc2)\n\n if self.cosine_spacing:\n # points are generated according to cosine distribution of\n # the X-coordinates of the chord\n zc = cld / 0.3 * yc\n dyc1_dx = 1.0 / 6.0 * k1 * (\n 3 * np.power(xc1, 2) - 6 * m * xc1 + np.power(m, 2) *\n (3 - m))\n dyc2_dx = np.tile(-1.0 / 6.0 * k1 * np.power(m, 3),\n len(xc2))\n dyc_dx = np.append(dyc1_dx, dyc2_dx)\n theta = np.arctan(dyc_dx)\n self.xup_coordinates = x - yt * np.sin(theta)\n self.yup_coordinates = zc + yt * np.cos(theta)\n self.xdown_coordinates = x + yt * np.sin(theta)\n self.ydown_coordinates = zc - yt * np.cos(theta)\n else:\n # Linear spacing distribution of the foil coordinates\n self.xup_coordinates = np.linspace(\n 0.0, 1.0, num=self.n_points)\n self.xdown_coordinates = np.linspace(\n 0.0, 1.0, num=self.n_points)\n self.yup_coordinates = yc + yt\n self.ydown_coordinates = yc - yt\n\n else:\n raise Exception", "def CDFconvertToDistr(self,pts):\n return self._convertCdfPointsToDistr(self._convertStdPointsToCdf(pts))", "def to_polar(center_coords, neighbors_coords):\n return cart2pol((neighbors_coords - center_coords)[:, 0],\n (neighbors_coords - center_coords)[:, 1])", "def rapoint(rpoint):\r\n return [rpoint[0]*gv[\"globalscale\"]*(gv[\"fixedUR\"][0]-gv[\"fixedLL\"][0]),\r\n rpoint[1]*gv[\"globalscale\"]*(gv[\"fixedUR\"][1]-gv[\"fixedLL\"][1])]", "def convert_coordinate_system_2d(x, z):\n\n return x, -z", "def local2global(local_coord, start, end, strand):\n\n # swap if strands disagree\n if strand == 1:\n return local_coord + start\n else:\n return end - local_coord", "def pointToWorld( nImageX, nImageY, rDepth, rMaxX = 320, rMaxY = 240, rFieldOfViewX = 60, rFieldOfViewY = 40 ):\n # convert to [-0.5,0.5]\n rCenteredX = ( nImageX / rMaxX ) - 0.5;\n rCenteredY = ( nImageY / rMaxY ) - 0.5;", "def normalize_wrt_x(self):\n\n x_min = min(self.x)\n x_max = max(self.x)\n y_min = min(self.y)\n\n x_range = x_max - x_min\n\n x = np.array(self.x)\n y = np.array(self.y)\n x -= x_min\n y -= y_min\n x = x / float(x_range)\n y = y / float(x_range)\n\n self.x = x.tolist()\n self.y = y.tolist()", "def normalize_wrt_y(self):\n\n x_min = min(self.x)\n y_min = min(self.y)\n y_max = max(self.y)\n\n y_range = y_max - y_min\n\n x = np.array(self.x)\n y = np.array(self.y)\n x -= x_min\n y -= y_min\n x = x / float(y_range)\n y = y / float(y_range)\n\n self.x = x.tolist()\n self.y = y.tolist()", "def define_grid():\n grid_left = np.array([[-13.1000000000000, -35.5000000000000, -48.3000000000000, -60, -16.9000000000000,\n -34.8000000000000, -67.5000000000000, -46.1000000000000, -59.8000000000000,\n -14.2000000000000, -28.3000000000000, -42.3000000000000, -67.6000000000000,\n -50.5000000000000, -14.6000000000000, -60.9000000000000, -31.6000000000000,\n -5.10000000000000, -65.6000000000000, -41.8000000000000, -55.1000000000000,\n -22.7000000000000, -5.80000000000000, -49.2000000000000, -34.5000000000000,\n -61.5500000000000, -63.6000000000000, -40.4000000000000, -48.7000000000000,\n -21.8000000000000, -58.2000000000000, -7, -36.3000000000000, -48.1000000000000,\n -56.8000000000000, -7.30000000000000, -22.2000000000000, -36.8000000000000,\n -46.8000000000000],\n [-67.7000000000000, -60, -55.1000000000000, -51.8000000000000, -51.6000000000000,\n -49.3000000000000, -47.1000000000000, -43.7000000000000, -39.6000000000000,\n -39.1000000000000, -31.2000000000000, -30.7000000000000, -30.1000000000000,\n -24.4000000000000, -22.7000000000000, -18.7000000000000, -16.9000000000000,\n -12.6000000000000, -10.8000000000000, -10.2000000000000, -4.01000000000000, 1.20000000000000,\n 2.80000000000000, 3.70000000000000, 3.90000000000000, 6.20000000000000, 8.30000000000000,\n 11.8000000000000, 14.5000000000000, 16, 18.2000000000000, 18.4000000000000, 19.9000000000000,\n 24.6000000000000, 28.5200000000000, 33.8000000000000, 35, 35.4000000000000,\n 35.6000000000000],\n [69.1000000000000, 66, 58.2000000000000, 48, 78, 71.7000000000000, 31, 61.1000000000000,\n 53.3000000000000, 81.1000000000000, 76, 70.2000000000000, 41.2000000000000, 64.4000000000000,\n 80.2000000000000, 50.9000000000000, 75.2000000000000, 77.3000000000000, 37.8000000000000, 67,\n 53.2000000000000, 72, 74.8000000000000, 54.7000000000000, 66.5000000000000, 35.9000000000000,\n 25.7000000000000, 60.7000000000000, 50.5000000000000, 68.9000000000000, 27.3000000000000,\n 70.3000000000000, 59.6000000000000, 44, 20.8000000000000, 61.7000000000000, 57.2000000000000,\n 47, 36]])\n stn_left = np.array([[-14.6, -13.2, -11.7, -9.10, -11.7, -13.2, -7.90, -10],\n [-15.1, -15.1, -15.1, -12.6, -12.6, -12.6, -9.40, -10.1],\n [-5.40, -7.20, -8.70, -8.70, -7.50, -5.10, -10.3, -7.80]])\n grid_right = np.copy(grid_left)\n grid_right[0, :] = grid_right[0, :] * -1\n stn_right = np.copy(stn_left)\n stn_right[0, :] = stn_right[0, :] * -1\n\n return grid_left, grid_right, stn_left, stn_right", "def get_point_coords_wrt_image(boxes, point_coords):\n # with tf.variable_scope(\"get_point_coords_wrt_image\", reuse=False):\n boxes = tf.stop_gradient(boxes)\n point_coords = tf.stop_gradient(point_coords)\n h = boxes[:, None, 2] - boxes[:, None, 0]\n w = boxes[:, None, 3] - boxes[:, None, 1]\n y1 = boxes[:, None, 0]\n x1 = boxes[:, None, 1]\n scale = tf.stack([h, w], axis=-1)\n trans = tf.stack([y1, x1], axis=-1)\n point_coords = point_coords * scale\n point_coords = point_coords + trans\n return point_coords", "def create_grid(xlim, ylim, step):\n x_range = np.arange(xlim[0], xlim[1], step)\n y_range = np.arange(ylim[0], ylim[1], step)\n return x_range, y_range", "def ggpl_stair_landings(dx,dy,dz):\n dati=rizerAndTread(dy,dz)\n nGradini = dati[2]\n alzata=dati[1]\n pedata=dati[0]\n yPianerottolo = dy/3.0\n lGradino = dx/2\n pianerottolo=CUBOID([dx,yPianerottolo,alzata])\n halfSteps=0\n dispari=0\n if nGradini%2==0:\n halfSteps=nGradini/2\n else:\n halfSteps=(nGradini-1)/2\n dispari=1\n scala=[]\n diagonale=[]\n dist=[-(dx/2),dx/2]\n dist2=[dx/2]\n distDiag=QUOTE(dist)\n scala.append(T([1])(lGradino))\n a=0\n p=pedata\n diagonale.append([a,p])\n diagonale.append([alzata,p])\n diagonale.append([alzata,2*p])\n d=MKPOL([diagonale,[[1,2,3]],None])\n d=PROD([distDiag,d])\n d=STRUCT([R([2,3])(PI/2),d])\n d=STRUCT([R([1,2])(PI),d])\n d=STRUCT([T([1])(dx*3/2),d])\n for i in range (1,int(halfSteps)):\n p=p+pedata\n a=a+alzata\n scala.append(CUBOID([lGradino,pedata,alzata]))\n scala.append(T([1,2,3])([0,pedata,alzata]))\n d=STRUCT([d,T([2,3])([pedata,alzata]),d])\n \n scala.append(CUBOID([lGradino,pedata,alzata]))\n scala.append(T([1,2,3])([-lGradino,pedata,alzata]))\n scala.append(pianerottolo)\n \n scala1=STRUCT([STRUCT(scala),d])\n \n if dispari:\n halfSteps2=halfSteps+1\n\n scalaMirror=[]\n diagonaleMirror=[]\n dist=[-(dx/2),dx/2]\n dist2=[dx/2]\n distDiag=QUOTE(dist)\n scalaMirror.append(T([1])(lGradino))\n a=0\n p=pedata\n diagonaleMirror.append([a,p])\n diagonaleMirror.append([alzata,p])\n diagonaleMirror.append([alzata,2*p])\n d2=MKPOL([diagonale,[[1,2,3]],None])\n d2=PROD([distDiag,d2])\n d2=STRUCT([R([2,3])(PI/2),d2])\n d2=STRUCT([R([1,2])(PI),d2])\n d2=STRUCT([T([1,2,3])([dx*3/2,-pedata,-alzata]),d2])\n \n for i in range (1,int(halfSteps)):\n d2=STRUCT([d2,T([2,3])([pedata,alzata]),d2])\n p=p+pedata\n a=a+alzata\n scalaMirror.append(CUBOID([lGradino,pedata,alzata]))\n scalaMirror.append(T([1,2,3])([0,pedata,alzata]))\n\n p=p+pedata\n a=a+alzata\n \n scalaMirror.append(CUBOID([lGradino,pedata,alzata]))\n \n \n scala2=STRUCT([STRUCT(scalaMirror),d2])\n scala2=STRUCT([R([1,2])(PI),scala2])\n scala2=STRUCT([T([1,2,3])([lGradino*2,pedata*(halfSteps),alzata*(halfSteps+1)]),scala2])\n \n scala2=STRUCT([scala1,scala2])\n a=SIZE([1,2,3])(BOX([1,2,3])(scala2))\n \n sx=dx/a[0] \n sy=dy/a[1]\n sz=dz/a[2]\n\n scala2=STRUCT([COLOR(color(255,255,255)),S([1,2,3])([sx,sy,sz]),scala2])\n return scala2", "def remap(x: float, min_x: float, max_x: float, min_y: float, max_y: float) -> float:\n return min_y + (max_y - min_y) * ((x - min_x) / (max_x - min_x))", "def desiredENU2localSp(self, xd, yd, zd):\n\n\t\tif self.INDOOR:\n\t\t\tself.mavros_sp.position.x = xd\n\t\t\tself.mavros_sp.position.y = yd\n\t\t\tself.mavros_sp.position.z = zd\n\t\telse:\n\n\t\t\tlat, lon, alt = self.desiredENU2geo(xd, yd, zd)\n\t\t\t# converts to differences in robot's ENU\n\t\t\tx,y,z = pm.geodetic2enu(lat, lon, alt, self.curr_lat, self.curr_lon, self.gpsAlt)\n\n\t\t\t# Add the differences to robot's current location\n\t\t\tself.mavros_sp.position.x = self.local_pose.pose.position.x + x\n\t\t\tself.mavros_sp.position.y = self.local_pose.pose.position.y + y\n\t\t\tself.mavros_sp.position.z = zd + self.GND_ALT", "def g2l(global_x, global_y, hero):\n \n l_hero_x = (visible_squares[0]-1)/2 #8\n l_hero_y = (visible_squares[1]-1)/2 #8\n \n \n local_x = global_x - hero.x + l_hero_x\n local_y = global_y - hero.y + l_hero_y\n \n return local_x, local_y", "def predict_coords(self):\r\n\r\n if self.direction == 1:\r\n return [self.coords[0] + 1, self.coords[1]]\r\n if self.direction == 2:\r\n return [self.coords[0] - 1, self.coords[1]]\r\n if self.direction == 3:\r\n return [self.coords[0], self.coords[1] + 1]\r\n if self.direction == 4:\r\n return [self.coords[0], self.coords[1] - 1]", "def deg2rad_inplace(a):" ]
[ "0.6075092", "0.5820298", "0.57956177", "0.5740052", "0.5732382", "0.5556223", "0.55544716", "0.55006737", "0.5466339", "0.54140425", "0.5391508", "0.53908247", "0.53622574", "0.53531116", "0.53329784", "0.5326264", "0.5315539", "0.528949", "0.52882594", "0.5276278", "0.5251392", "0.52058196", "0.51968086", "0.51949346", "0.51901746", "0.5188935", "0.51879674", "0.51879674", "0.5182274", "0.5180567", "0.5172197", "0.5155267", "0.51371455", "0.51143134", "0.5111798", "0.51092654", "0.51017123", "0.5096856", "0.5087323", "0.50870883", "0.5076313", "0.5075994", "0.5075087", "0.50692993", "0.5061499", "0.50564086", "0.5042197", "0.50344676", "0.5033513", "0.50327", "0.50265574", "0.5022785", "0.5017934", "0.50167376", "0.50164634", "0.5016341", "0.50147855", "0.50095284", "0.50058085", "0.4999621", "0.49934146", "0.49927592", "0.4991944", "0.49890566", "0.49889147", "0.4986666", "0.49810514", "0.4972615", "0.49664056", "0.49590182", "0.4954804", "0.49501657", "0.4949416", "0.4947683", "0.4940624", "0.49391478", "0.4924331", "0.4923333", "0.4917383", "0.4913592", "0.49082738", "0.4906711", "0.49036083", "0.49017146", "0.49014834", "0.4898165", "0.48962635", "0.4895051", "0.48943406", "0.48926938", "0.48885655", "0.48858207", "0.48803338", "0.48784262", "0.4878394", "0.48782793", "0.48765528", "0.48728105", "0.48719704", "0.48700878" ]
0.56694
5
Visualise and record DoADoppler point clouds
def visualise_doa(doa_points, doa_labels, path, centroids=None): if isinstance(centroids, np.ndarray) and len(centroids.shape) == 1: centroids = centroids.reshape(1, -1) n_clusters = np.unique(doa_labels).shape[0] fig = plt.figure() ax = fig.add_subplot(111, projection='3d') colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk') colors = cycle(['b', 'g', 'r', 'c', 'm', 'y', 'blueviolet', 'brown', 'burlywood', 'khaki', 'indigo', 'peru', 'pink', 'rosybrown', 'teal', 'seagreen']) for k, col in zip(range(n_clusters), colors): indexes = doa_labels == k ax.scatter(doa_points[indexes, 0], doa_points[indexes, 1], doa_points[indexes, 2], 'ro', c=col) if isinstance(centroids, np.ndarray): ax.scatter(centroids[:, 0], centroids[:, 1], centroids[:, 2], 'ro', c='black') ax.set_xlabel('x (m)') ax.set_ylabel('y (m)') ax.set_zlabel('Doppler-Velocity (m/s)') ax.set_title('3D representation of the DoA points with centroid') plt.savefig(path) plt.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def publish_point_cloud(self):\n all_points = [np.zeros((0, 2), np.float32)]\n all_keys = []\n for key in range(len(self.keyframes)):\n pose = self.keyframes[key].pose\n transf_points = self.keyframes[key].transf_points\n all_points.append(transf_points)\n all_keys.append(key * np.ones((len(transf_points), 1)))\n\n all_points = np.concatenate(all_points)\n all_keys = np.concatenate(all_keys)\n sampled_points, sampled_keys = pcl.downsample(\n all_points, all_keys, self.point_resolution\n )\n sampled_xyzi = np.c_[sampled_points, np.zeros_like(sampled_keys), sampled_keys]\n if len(sampled_xyzi) == 0:\n return\n\n if self.save_fig:\n plt.figure()\n plt.scatter(\n sampled_xyzi[:, 0], sampled_xyzi[:, 1], c=sampled_xyzi[:, 3], s=1\n )\n plt.axis(\"equal\")\n plt.gca().invert_yaxis()\n plt.savefig(\"step-{}-map.png\".format(self.current_key - 1), dpi=100)\n plt.close(\"all\")\n\n cloud_msg = n2r(sampled_xyzi, \"PointCloudXYZI\")\n cloud_msg.header.stamp = self.current_keyframe.time\n cloud_msg.header.frame_id = \"map\"\n self.cloud_pub.publish(cloud_msg)", "def render_point_cloud(frame_id):\n point_cloud_world_coordinates = get_point_cloud_world_coordinates(frame_id)\n # pptk\n v = pptk.viewer(point_cloud_world_coordinates)\n v.set(point_size=0.0001)\n v.color_map('cool', scale=[0, 5])", "def visualize_point_cloud(points, colors=None, normals=None,\n show_frame=False, frame_size=1.0, frame_origin=(0, 0, 0)):\n pc = np2pcd(points, colors, normals)\n geometries = [pc]\n if show_frame:\n coord_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=frame_size, origin=frame_origin)\n geometries.append(coord_frame)\n o3d.visualization.draw_geometries(geometries)", "def visualize(self):\n self.octree.updateInnerOccupancy()\n print(\"Start Octomap Visualization\")\n\n # define parameters\n data = imgviz.data.arc2017()\n camera_info = data['camera_info']\n K = np.array(camera_info['K']).reshape(3, 3)\n width=camera_info['width']\n height=camera_info['height']\n\n # get free and occupied grid\n occupied, _ = self.octree.extractPointCloud()\n #frontier = self.gen_frontier()\n \n print(\"load point cloud\")\n window = pyglet.window.Window(\n width=int(1280), height=int(960)\n )\n\n @window.event\n def on_key_press(symbol, modifiers):\n if modifiers == 0:\n if symbol == pyglet.window.key.Q:\n window.on_close()\n\n gui = glooey.Gui(window)\n hbox = glooey.HBox()\n hbox.set_padding(5)\n\n camera = trimesh.scene.Camera(\n resolution=(width, height), focal=(K[0, 0], K[1, 1])\n )\n\n # initial camera pose\n camera_transform = np.array(\n [\n [1, 0, 0, 0],\n [0, -1, 0, 0],\n [0, 0, -1, -5],\n [0.0, 0.0, 0.0, 1.0],\n ],\n )\n\n \n\n occupied_geom = trimesh.voxel.ops.multibox(\n occupied, pitch=self.resolution, colors=[0.0, 0.0, 0.0, 0.5]\n )\n\n # frontier_geom = trimesh.voxel.ops.multibox(\n # frontier, pitch=self.resolution, colors=[1.0, 0, 0, 0.5]\n # )\n scene = trimesh.Scene(camera=camera, geometry=[occupied_geom])#, frontier_geom])\n scene.camera_transform = camera_transform\n hbox.add(self.labeled_scene_widget(scene, label='octomap'))\n\n\n gui.add(hbox)\n pyglet.app.run()", "def make_point_cloud(self):\r\n\r\n self.pointCloud = VtkPointCloud()\r\n for k in range(np.size(self.pos, 0)):\r\n self.pointCloud.addPoint(self.pos[k, :])\r\n\r\n # Renderer\r\n renderer = vtk.vtkRenderer()\r\n renderer.AddActor(self.pointCloud.vtkActor)\r\n renderer.SetBackground(.2, .3, .4)\r\n renderer.SetBackground(0.0, 0.0, 0.0)\r\n renderer.ResetCamera()\r\n\r\n # Render Window\r\n renderWindow = vtk.vtkRenderWindow()\r\n renderWindow.AddRenderer(renderer)\r\n\r\n # Interactor\r\n renderWindowInteractor = vtk.vtkRenderWindowInteractor()\r\n renderWindowInteractor.SetRenderWindow(renderWindow)\r\n\r\n # Begin Interaction\r\n renderWindow.Render()\r\n renderWindow.SetWindowName(\"XYZ Data Viewer: \")\r\n renderWindowInteractor.Start()", "def visualize_scan(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(self.p1_points[:, 0], self.p1_points[:, 1], self.p1_points[:, 2], c='r')\n ax.scatter(self.p2_points[:, 0], self.p2_points[:, 1], self.p2_points[:, 2], c='g')\n ax.scatter(self.p3_points[:, 0], self.p3_points[:, 1], self.p3_points[:, 2], c='b')\n ax.scatter(self.p4_points[:, 0], self.p4_points[:, 1], self.p4_points[:, 2])\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n plt.show()", "def draw_pointcloud(ax, example):\n points = example['points'].cpu().detach().numpy()\n points_num = len(points)\n xs = np.empty([points_num])\n ys = np.empty([points_num])\n zs = np.empty([points_num])\n intensity = np.empty([len(points)])\n for j, point in enumerate(points):\n xs[j] = point[1]\n ys[j] = point[2]\n zs[j] = point[3]\n intensity[j] = point[4]\n\n intensity = intensity\n ax.scatter3D(xs, ys, zs, c=intensity, marker='.', s=0.3, cmap=plt.get_cmap('jet'))", "def generate_pointcloud(rgb_file, mask_file,depth_file,ply_file):\n rgb = Image.open(rgb_file)\n # depth = Image.open(depth_file)\n depth = Image.open(depth_file).convert('I')\n mask = Image.open(mask_file).convert('I')\n\n # if rgb.size != depth.size:\n # raise Exception(\"Color and depth image do not have the same resolution.\")\n # if rgb.mode != \"RGB\":\n # raise Exception(\"Color image is not in RGB format\")\n # if depth.mode != \"I\":\n # raise Exception(\"Depth image is not in intensity format\")\n\n\n points = [] \n for v in range(rgb.size[1]):\n for u in range(rgb.size[0]):\n color = rgb.getpixel((u,v))\n # Z = depth.getpixel((u,v)) / scalingFactor\n # if Z==0: continue\n # X = (u - centerX) * Z / focalLength\n # Y = (v - centerY) * Z / focalLength\n if (mask.getpixel((u,v))<55):\n Z = depth.getpixel((u, v))*.22 \n if Z == 0: continue\n Y = .22 * v\n X = .22 * u\n points.append(\"%f %f %f %d %d %d 0\\n\"%(X,Y,Z,color[0],color[1],color[2]))\n file = open(ply_file,\"w\")\n file.write('''ply\nformat ascii 1.0\nelement vertex %d\nproperty float x\nproperty float y\nproperty float z\nproperty uchar red\nproperty uchar green\nproperty uchar blue\nproperty uchar alpha\nend_header\n%s\n'''%(len(points),\"\".join(points)))\n file.close()", "def visualize(self):\n colors = {'outline': (220, 220, 220),\n 'inlier': (0, 255, 0),\n 'outlier': (0, 0, 255),\n 'lines': (128, 220, 128)}\n # Create output image for visualization\n gap = 5\n h1, w1 = self.target.image.shape[:2]\n h2, w2 = self.image.shape[:2]\n vis = np.zeros((max(h1, h2), w1 + w2 + gap, 3), np.uint8)\n vis[:h1, :w1, :] = self.target.image\n w1 += gap\n vis[:h2, w1:w1+w2, :] = self.image\n \n # Draw the located object \n quad = np.float32(self.quad) + np.float32([w1, 0])\n self.draw(vis, colors['outline'], 2, quad)\n \n # draw point details\n inliers = [(x0, y0, x1 + w1, y1) for (x0, y0), (x1, y1) in self.inliers]\n outliers = [(x0, y0, x1 + w1, y1) for (x0, y0), (x1, y1) in self.outliers]\n if colors['outlier'] is not None: # draw x on each point\n r = 2 # radius\n thickness = 2\n for x0, y0, x1, y1 in outliers:\n cv2.line(vis, (x0 - r, y0 - r), (x0 + r, y0 + r), colors['outlier'], thickness)\n cv2.line(vis, (x0 + r, y0 - r), (x0 - r, y0 + r), colors['outlier'], thickness)\n cv2.line(vis, (x1 - r, y1 - r), (x1 + r, y1 + r), colors['outlier'], thickness)\n cv2.line(vis, (x1 + r, y1 - r), (x1 - r, y1 + r), colors['outlier'], thickness)\n if colors['lines'] is not None:\n for x0, y0, x1, y1 in inliers:\n cv2.line(vis, (x0, y0), (x1, y1), colors['lines'], 1)\n if colors['inlier'] is not None:\n for x0, y0, x1, y1 in inliers:\n cv2.circle(vis, (x0, y0), 2, colors['inlier'], -1)\n cv2.circle(vis, (x1, y1), 2, colors['inlier'], -1)\n return vis", "def create_pointcloud(pts):\n depths = PointCloud()\n depths.header = std_msgs.msg.Header()\n depths.header.stamp = rospy.Time.now()\n depths.header.frame_id = \"view_zero\"\n depths.points = [None] * len(pts)\n for p in xrange(len(pts)):\n #Giving point the same orientation as the robot\n y = pts[p,0]\n z = - pts[p,1] #y in images is down\n x = pts[p,2]\n depths.points[p] = Point(x, y, z)\n return depths", "def create_point_cloud(self):\n pixels = []\n colors = []\n my_pixels = []\n for j in range(self.height):\n for i in range(self.width):\n depth = self.depth[j, i]\n pixels.append(\n [i * depth, j * depth, depth]\n )\n my_pixels.append(\n [i, j, 1]\n )\n # make rgb with flip()\n colors.append(np.flip(self.bgr[j, i, :]))\n # colors.append(self.bgr[j, i, :])\n self.my_pixels = my_pixels\n pixels = np.array(pixels)\n\n # project pixels to camera space\n self.xyz_points = self.intrinsics_inv @ np.transpose(pixels)\n self.color_points = colors\n\n # now add 1s to the points for homogenous coordinates\n num_points = self.get_num_xyz_points()\n ones = np.ones((1, num_points))\n self.xyzw_points = np.concatenate((self.xyz_points, ones), axis=0)\n\n self.scene = None\n self.camera_pose = None\n self.nm = None\n self.nl = None\n self.nc = None\n self.create_mesh()", "def point_cloud(self):\n\t\tgen = self.loop(point_cloud=True)\n\t\tpoint_cloud = next(gen)\n\t\treturn point_cloud", "def debug_filter_points(self, points):\n cloud_msg = PointCloud2()\n cloud_msg.header.frame_id = \"map\"\n cloud_msg.header.stamp = rospy.Time.now() \n xyz = [[p.pose.position.x, p.pose.position.y, p.pose.position.z] for p in points] \n point_cloud = pc2.create_cloud_xyz32(cloud_msg.header, xyz)\n self._points_publisher.publish(point_cloud)", "def point_from_rays(self):\n print \"generating the 3d point from given clicked points\"\n \n #gather cams and points clicked \n uvs = []\n cams = []\n for iFrame in self.frames:\n if iFrame.lastClick : \n uv = numpy.multiply(iFrame.lastClick,self.reduceFactor)\n uvs.append(uv)\n cam = load_perspective_camera(self.camList[iFrame.currImg])\n cams.append(cam)\n point = get_3d_from_cams(cams, uvs)\n self.point3d = point;\n self.pointLabel.set(\"3d Point: \" + str(self.point3d))\n\n # project 3d point into each image, and gather intensities \n values = []\n ims = []\n for idx, img in enumerate(self.imgList):\n cam = load_perspective_camera(self.camList[idx])\n imgPoint = project_point(cam, point[0], point[1], point[2])\n imgPoint = numpy.divide(imgPoint, self.reduceFactor)\n self.allUVs.append(imgPoint)\n \n #grab float intensity value at this point \n imgView,ni,nj = load_image(img)\n val = pixel(imgView, imgPoint)\n if val > 0.0:\n values.append(val)\n ims.append(idx)\n \n #cleanup\n remove_from_db([imgView, cam])\n \n\n #write mean/std of intensities \n self.meanLabel.set(\"Mean: \" + str(numpy.mean(values)) )\n self.stdLabel.set(\"Std Dev: \" + str(numpy.std(values)) )\n #plot the intensities by image number \n self.f.clf();\n self.a = self.f.add_subplot(311)\n self.a.set_xlabel(\"img #\")\n self.a.set_ylabel(\"intensity\")\n self.a.plot(ims, values)\n #plot the histogram of intensities by image number \n pdf, bins, patches = plt.hist(values)\n self.b = self.f.add_subplot(313)\n self.b.set_xlabel(\"bin val\")\n self.b.set_ylabel(\"freq\")\n self.b.hist(values, 15, normed=1, facecolor=\"green\" )\n self.canvas.show();", "def visualize(self):\n # TODO\n #pyLDAvis.enable_notebook()\n #vis = pyLDAvis.gensim.prepare(self.lda_model, self.stemmed_corpus)\n return", "def save_3d_render(\r\n self, points: List[np.ndarray], colors: List[np.ndarray]\r\n ) -> None:\r\n pcd = o3d.geometry.PointCloud()\r\n pcd.points = o3d.utility.Vector3dVector(np.vstack(points).astype(np.float64))\r\n pcd.colors = o3d.utility.Vector3dVector(np.vstack(colors))\r\n if self.debug:\r\n o3d.visualization.draw_geometries([pcd])\r\n if not self.debug:\r\n o3d.io.write_point_cloud(f\"results/{self.filename[:-4]}.ply\", pcd)", "def render_point_cloud(self, point_cloud, extrinsics=Pose(), color=GRAY):\n\n combined_transform = self._bev_rotation * extrinsics\n\n pointcloud_in_bev = combined_transform * point_cloud\n point_cloud2d = pointcloud_in_bev[:, :2]\n\n point_cloud2d[:, 0] = (self._center_pixel[0] + point_cloud2d[:, 0] * self._pixels_per_meter)\n point_cloud2d[:, 1] = (self._center_pixel[1] + point_cloud2d[:, 1] * self._pixels_per_meter)\n\n H, W = self.data.shape[:2]\n uv = point_cloud2d.astype(np.int32)\n in_view = np.logical_and.reduce([\n (point_cloud2d >= 0).all(axis=1),\n point_cloud2d[:, 0] < W,\n point_cloud2d[:, 1] < H,\n ])\n uv = uv[in_view]\n self.data[uv[:, 1], uv[:, 0], :] = color", "def plotrgcloud(self):\n print self.kpunten\n for i in range(len(self.kpunten[0])):\n self.writetext('sen ='+ self.kpunten[0][i][0], (0.65,0.85), axnum = 0, hor = None ,ver = None , rot = None ,fs =14 , transform = self.fig.axes[0].transAxes)\n if i == len(self.kpunten[0]) -1 :\n end = None\n else:\n end = self.kpunten[0][i+1][1] + 1\n print end\n self.plotrgwrap( self.rgindex,2*self.reader.npair+self.rgindex,'real part of rgvars (a.u)' , 'imaginary part of rgvars (a.u.)', tit ='RG vars g = %f all states'%(self.chardata) , begin = self.kpunten[0][i][1] , stop = end , name = 'cpcloud'+ self.kpunten[0][i][0] , filenum = 0)", "def open3dpaint(nppoints, color_map = 'jet', reduce_for_vis = False, voxel_size = 0.1, pointsize = 0.1):\n assert (type(nppoints) == pclpy.pcl.PointCloud.PointXYZRGB) or (type(nppoints) == pclpy.pcl.PointCloud.PointXYZ) or (type(nppoints) == np.ndarray) or (type(nppoints) is list) or (type(nppoints) is tuple), 'Not valid point_cloud'\n \n if (type(nppoints) is not list) & (type(nppoints) is not tuple):\n nppoints = [nppoints]\n try:\n visualizer = open3d.visualization.Visualizer()\n visualizer.create_window()\n options = visualizer.get_render_option()\n options.background_color = np.asarray([0, 0, 0])\n options.point_size = pointsize\n\n if len(nppoints) > 1:\n for n,i in enumerate(nppoints):\n workpoints = i\n if (type(workpoints) == pclpy.pcl.PointCloud.PointXYZRGB) or (type(workpoints) == pclpy.pcl.PointCloud.PointXYZ):\n workpoints = workpoints.xyz\n\n if reduce_for_vis:\n workpoints = seg_tree.voxelize(workpoints,voxel_size)\n\n points = convertcloud(workpoints)\n color_coef = n/len(nppoints)/2 + n%2*.5\n if type(color_map) == np.ndarray:\n color = color_map\n elif color_map == 'jet':\n color=cm.jet(color_coef)[:3]\n else:\n color=cm.Set1(color_coef)[:3]\n points.colors = open3d.utility.Vector3dVector(np.ones_like(workpoints)*color)\n #points.colors = open3d.utility.Vector3dVector(color)\n visualizer.add_geometry(points)\n else:\n workpoints = nppoints[0]\n if (type(workpoints) == pclpy.pcl.PointCloud.PointXYZRGB) or (type(workpoints) == pclpy.pcl.PointCloud.PointXYZ):\n workpoints = workpoints.xyz\n \n if reduce_for_vis:\n workpoints = seg_tree.voxelize(workpoints,voxel_size)\n points = convertcloud(workpoints)\n visualizer.add_geometry(points)\n visualizer.run()\n visualizer.destroy_window()\n \n except Exception as e:\n print(type(e))\n print(e.args)\n print(e)\n visualizer.destroy_window()", "def __show_source(self):\n pcd = o3d.io.read_point_cloud(\n self.source_cloud\n )\n if np.asarray(pcd.points).shape[0] != 0:\n pcd.paint_uniform_color([0, 1, 0])\n pcd.estimate_normals()\n self.source_point_cloud_view.load_cloud(pcd)\n try:\n self.source_point_cloud_view.show_window()\n except RuntimeError:\n pass\n else:\n QtWidgets.QMessageBox.warning(self, \"Error\",\n f\"Source point cloud is no longer available\"\n )\n self.source_cloud = \"\"\n self.__update_clickability()\n self.__save_context()", "def draw_points(self, pic_path, points_data):\n # Pupil Finding here\n pupils = get_eye_locations_in_image(pic_path)\n img = cv2.imread(pic_path)\n frame_number = int(re.findall(r'\\d+', pic_path.split('/')[-1])[0])\n dets = detector(img)\n shape = None\n height, width, channels = img.shape\n\n for k, d in enumerate(dets):\n shape = predictor(img, d)\n\n if(not shape):\n return\n\n pointList = []\n c = 0\n for b in range(68):\n # sanitizing input points\n point = Point(shape.part(b).x, shape.part(b).y)\n points_data[c] = [point.x, point.y]\n c = c + 1\n # some points might be out of bound\n # so, move them to the closest boundary\n if(point.x < 0):\n point.x = 0\n elif(point.x >= width):\n point.x = width - 1\n if(point.y < 0):\n point.y = 0\n elif(point.y >= height):\n point.y = height - 1\n\n pointList.append(point)\n\n roll = findRoll(pointList)\n #print(\"roll is \" + str(roll) + ' angles')\n yaw = findYaw(pointList)\n #print(\"yaw is \" + str(yaw) + ' angles')\n pitch = findPitch(pointList)\n #print(\"pitch is \" + str(pitch) + ' angles')\n self.data[frame_number] = [roll, yaw, pitch]\n counter = 0\n for point in pointList:\n cv2.circle(img, (point.x, point.y), ImageProcessor.POINT_SIZE, ImageProcessor.POINT_COLOR, -1)\n counter = counter + 1\n\n self.draw_triangles(img, pointList)\n \n for pupil in pupils:\n cv2.circle(img, (pupil.left.x, pupil.left.y), 5, (0,0,255), -1)\n cv2.circle(img, (pupil.right.x, pupil.right.y), 5, (0,0,255), -1)\n points_data[-1] = [pupil.left.x, pupil.left.y]\n points_data[-2] = [pupil.right.x, pupil.right.y]\n #print(pupil.left.x, \", \", pupil.left.y)\n #print(pupil.right.x, \", \", pupil.right.y)\n\n cv2.imwrite(pic_path, img)", "def show_pointclouds(points, colors, text=[], title=\"Default\", png_path=\"\", interactive=True, orientation='horizontal'):\n\n # make sure pointclouds is a list\n assert isinstance(points, type([])), \\\n \"Pointclouds argument must be a list\"\n\n # make sure colors is a list\n assert isinstance(colors, type([])), \\\n \"Colors argument must be a list\"\n\n # make sure number of pointclouds and colors are the same\n assert len(points) == len(colors), \\\n \"Number of pointclouds (%d) is different then number of colors (%d)\" % (len(points), len(colors))\n\n while len(text) < len(points):\n text.append(\"\")\n\n # Number of pointclouds to be displayed in this window\n num_pointclouds = len(points)\n\n point_size = 10\n pointclouds = [VtkPointCloud(point_size) for _ in range(num_pointclouds)]\n renderers = [vtk.vtkRenderer() for _ in range(num_pointclouds)]\n\n height = 1.0 / max(num_pointclouds, 1)\n viewports = [(i*height, (i+1)*height) for i in range(num_pointclouds)]\n #print(viewports)\n\n # iterate over all point clouds\n for i, pc in enumerate(points):\n pc = pc.squeeze()\n co = colors[i].squeeze()\n assert pc.shape[0] == co.shape[0], \\\n \"expected same number of points (%d) then colors (%d), cloud index = %d\" % (pc.shape[0], co.shape[0], i)\n assert pc.shape[1] == 3, \"expected points to be N x 3, got N x %d\" % pc.shape[1]\n assert co.shape[1] == 3, \"expected colors to be N x 3, got N x %d\" % co.shape[1]\n\n # for each point cloud iterate over all points\n for j in range(pc.shape[0]):\n point = pc[j, :]\n color = co[j, :]\n pointclouds[i].add_point(point, color)\n\n renderers[i].AddActor(pointclouds[i].vtkActor)\n # renderers[i].AddActor(vtk.vtkAxesActor())\n renderers[i].SetBackground(1.0, 1.0, 1.0)\n if orientation == 'horizontal':\n print(viewports[i][0])\n renderers[i].SetViewport(viewports[i][0], 0.0, viewports[i][1], 1.0)\n elif orientation == 'vertical':\n renderers[i].SetViewport(0.0, viewports[i][0], 1.0, viewports[i][1])\n else:\n raise Exception('Not a valid orientation!')\n renderers[i].ResetCamera()\n\n # Add circle to first render\n renderers[0].AddActor(getActorCircle())\n renderers[0].AddActor(getActorCircle(50, 49, color=(0, 1, 0)))\n\n # Text actors\n text_actors = [vtk.vtkTextActor() for _ in text]\n for i, ta in enumerate(text_actors):\n if orientation == 'horizontal':\n ta.SetInput(' ' + text[i])\n elif orientation == 'vertical':\n ta.SetInput(text[i] + '\\n\\n\\n\\n\\n\\n')\n else:\n raise Exception('Not a valid orientation!')\n txtprop = ta.GetTextProperty()\n txtprop.SetFontFamilyToArial()\n txtprop.SetFontSize(0)\n txtprop.SetColor(0, 0, 0)\n # txtprop.SetJustificationToCentered()\n # ta.SetDisplayPosition(500, 10)\n # ta.SetAlignmentPoint()\n renderers[i].AddActor(ta)\n\n # Render Window\n render_window = vtk.vtkRenderWindow()\n for renderer in renderers:\n render_window.AddRenderer(renderer)\n\n render_window_interactor = vtk.vtkRenderWindowInteractor()\n render_window_interactor.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())\n render_window_interactor.SetRenderWindow(render_window)\n\n [center_x, center_y, center_z] = np.mean(points[0].squeeze(), axis=0)\n camera = vtk.vtkCamera()\n # d = 10\n # camera.SetViewUp(0, -1, 0)\n\n # camera.SetPosition(center_x + d, center_y + d, center_z + d / 2)\n # camera.SetFocalPoint(center_x, center_y, center_z)\n # camera.SetFocalPoint(0, 0, 0)\n\n camera.SetViewUp(0, 0, 1)\n if orientation == 'horizontal':\n camera.SetPosition(3, -10, 2)\n camera.SetFocalPoint(3, 1.5, 1.5)\n elif orientation == 'vertical':\n camera.SetPosition(1.5, -6, 2)\n camera.SetFocalPoint(1.5, 1.5, 1.5)\n else:\n raise Exception('Not a valid orientation!')\n\n camera.SetClippingRange(0.002, 1000)\n for renderer in renderers:\n renderer.SetActiveCamera(camera)\n\n # Begin Interaction\n render_window.Render()\n render_window.SetWindowName(title)\n if orientation == 'horizontal':\n render_window.SetSize(1940, 720)\n elif orientation == 'vertical':\n render_window.SetSize(600, 1388)\n else:\n raise Exception('Not a valid orientation!')\n\n if interactive:\n render_window_interactor.Start()\n\n if png_path:\n # screenshot code:\n w2if = vtk.vtkWindowToImageFilter()\n w2if.SetInput(render_window)\n w2if.Update()\n\n writer = vtk.vtkPNGWriter()\n writer.SetFileName(png_path)\n writer.SetInputConnection(w2if.GetOutputPort())\n writer.Write()", "def showEntireDataset(wl_listG, wl_listV, tsvd_graphlet_vectors, kpca_graphlet_gram, tsvd_shortestpath_vectors,\n kpca_shortestpath_gram, classes):\n for i in range(1, 8):\n if (i == 6):\n data_tsvd = tsvd_graphlet_vectors\n data_kpca = kpca_graphlet_gram\n elif (i == 7):\n data_tsvd = tsvd_shortestpath_vectors\n data_kpca = kpca_shortestpath_gram\n else:\n data_tsvd = wl_listV[i - 1]\n data_kpca = wl_listG[i - 1]\n fig = plt.figure(figsize=(15, 15))\n if (i == 6):\n fig.suptitle('Graphlet', fontsize=25)\n elif (i == 7):\n fig.suptitle('Shortest Path', fontsize=25)\n else:\n fig.suptitle(f'Weisfeiler-Lehman {i}', fontsize=25)\n ax1 = fig.add_subplot(221)\n ax2 = fig.add_subplot(222)\n ax3 = fig.add_subplot(223, projection='3d')\n ax4 = fig.add_subplot(224, projection='3d')\n ax1.title.set_text('2D TruncatedSVD')\n ax2.title.set_text('2D KernelPCA')\n ax3.title.set_text('3D TruncatedSVD')\n ax4.title.set_text('3D KernelPCA')\n ax1.scatter(data_tsvd[:, 0], data_tsvd[:, 1], c=classes)\n ax2.scatter(data_kpca[:, 0], data_kpca[:, 1], c=classes)\n ax3.scatter3D(data_tsvd[:, 0], data_tsvd[:, 1], data_tsvd[:, 2], c=classes)\n ax4.scatter3D(data_kpca[:, 0], data_kpca[:, 1], data_kpca[:, 2], c=classes)\n plt.show()\n print(\"________________________________________________________________________________________\")\n print()", "def view_point_cloud_model_and_affordance(number_of_object_per_category=5):\n list_pc_paths = [f for f in glob.glob('./dataset/*.npy', recursive=True)]\n set_objects = set([os.path.basename(pc_path).split('_')[0] for pc_path in list_pc_paths])\n\n for obj in set_objects:\n try:\n # load point cloud models\n pc_models = np.load('./dataset/{}_point_cloud_models.npy'.format(obj))[\n :number_of_object_per_category]\n # load point cloud grasp affordance\n pc_affordance = np.load('./dataset/{}_point_cloud_grasp_affordance.npy'.format(obj))[\n :number_of_object_per_category]\n\n # visualization\n for i, m in enumerate(pc_models):\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n list_model_x, list_model_y, list_model_z = [], [], []\n list_affordance_x, list_affordance_y, list_affordance_z = [], [], []\n\n for x in range(32):\n for y in range(32):\n for z in range(32):\n if pc_affordance[i, x, y, z] == 1:\n list_affordance_x.append(x)\n list_affordance_y.append(y)\n list_affordance_z.append(z)\n elif m[x, y, z] == 1:\n list_model_x.append(x)\n list_model_y.append(y)\n list_model_z.append(z)\n\n ax.scatter(list_model_x, list_model_y, list_model_z, c='#0c457d')\n ax.scatter(list_affordance_x, list_affordance_y, list_affordance_z, c='#e8702a', alpha=0.35)\n ax.set_xlim(0, 32)\n ax.set_ylim(0, 32)\n ax.set_zlim(0, 32)\n plt.show()\n\n except FileNotFoundError:\n print('Some point cloud npy files are not found.')\n continue", "def showPoints(surface, points):\n for point in points:\n point.show(surface)", "def draw_known_points(self):\n if self.tracking:\n if self.current_tracked_point == None:\n return\n p = self.current_tracked_point\n x, y = (int(u) for u in p)\n cv2.circle(self.altered_image, (x, y), 10, (0, 0, 255), 1, cv2.LINE_AA)\n cv2.putText(self.altered_image, f'az:{x:.3f} alt:{y:.3f}', (x, y), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255))\n self.statusBar().showMessage(f'az: {x:.3f} alt: {y:.3f}')\n else:\n for p in self.known_image_points:\n x, y = (int(u) for u in p)\n\n cv2.circle(self.altered_image, (x, y), 5, (0, 0, 255), 1, cv2.LINE_AA)\n cv2.putText(self.altered_image, f'x:{x} y:{y}', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255))", "def __show_target(self):\n pcd = o3d.io.read_point_cloud(\n self.target_cloud\n )\n if np.asarray(pcd.points).shape[0] != 0:\n pcd.paint_uniform_color([0, 0, 1])\n pcd.estimate_normals()\n self.target_point_cloud_view.load_cloud(pcd)\n try:\n self.target_point_cloud_view.show_window()\n except RuntimeError:\n pass\n else:\n QtWidgets.QMessageBox.warning(self, \"Error\",\n f\"Target point cloud is no longer available\"\n )\n self.target_cloud = \"\"\n self.__update_clickability()\n self.__save_context()", "def plot(self):\n # Find only unmasked data :\n # xyz, sData, sColor, _ = self._select_unmasked()\n xyz, sData, sColor = self.xyz, self.sData, self.sColor\n\n # Render as cloud points :\n self.mesh = visu.Markers(name='Sources')\n self.mesh.set_data(xyz, edge_color=self.edgecolor, face_color=sColor,\n size=sData, scaling=self.scaling,\n edge_width=self.edgewidth, symbol=self.symbol)\n self.mesh.set_gl_state('translucent')", "def dump(self, data_points):\n print(data_points)", "def setup(config, session, pts_all):\n optic = config['Optic']\n general = config['General']\n\n numFrames_total_rough = session['frames_total']\n numVids = session['num_vids']\n spacing = optic['spacing']\n\n bbox_subframe_displacement = pts_all['bbox_subframe_displacement']\n pts_displacement = pts_all['pts_displacement']\n pts_x_displacement = pts_all['pts_x_displacement']\n pts_y_displacement = pts_all['pts_y_displacement']\n mask_frame_displacement = pts_all['mask_frame_displacement']\n\n ## Make point cloud\n pts_spaced = np.ones((np.int64(bbox_subframe_displacement[3] * bbox_subframe_displacement[2] / spacing),\n 2)) * np.nan ## preallocation\n cc = 0 ## set idx counter\n\n # make spaced out points\n for ii in range(len(pts_x_displacement)):\n if (pts_x_displacement[ii] % spacing == 0) and (pts_y_displacement[ii] % spacing == 0):\n pts_spaced[cc, 0] = pts_x_displacement[ii]\n pts_spaced[cc, 1] = pts_y_displacement[ii]\n cc = cc + 1\n\n pts_spaced = np.expand_dims(pts_spaced, 1).astype('single')\n pts_spaced = np.delete(pts_spaced, np.where(np.isnan(pts_spaced[:, 0, 0])), axis=0)\n print(f'number of points: {pts_spaced.shape[0]}')\n\n ## Define random colors for points in cloud\n color_tuples = list(np.arange(len(pts_x_displacement)))\n for ii in range(len(pts_x_displacement)):\n color_tuples[ii] = (np.random.rand(1)[0] * 255, np.random.rand(1)[0] * 255, np.random.rand(1)[0] * 255)\n\n ## Preallocate output variables\n\n # I add a bunch of NaNs to the end because the openCV estimate is usually less than the actual number of frames\n displacements = np.ones((pts_spaced.shape[0], 2, np.uint64(\n numFrames_total_rough + numFrames_total_rough * 0.1 + (numVids * 1000)))) * np.nan\n positions_recursive = np.ones((pts_spaced.shape[0], 2, np.uint64(\n numFrames_total_rough + numFrames_total_rough * 0.1 + (numVids * 1000)))) * np.nan\n\n ## Preset point tracking variables\n pointInds_toUse = copy.deepcopy(pts_spaced)\n pointInds_tracked = pointInds_toUse ## set the first frame to have point locations be positions in the point cloud\n pointInds_tracked_tuple = list(np.arange(pointInds_toUse.shape[0]))\n\n return pointInds_toUse, pointInds_tracked, pointInds_tracked_tuple, displacements, pts_spaced, color_tuples , positions_recursive", "def plot_data(self):", "def show_orbits(points, nb_pairs, title, save_path = None):\n\n fig = plt.figure(figsize = (6, 6))\n plt.title(title)\n plt.scatter(points[:, 0], points[:, 1], c = 'b', s = 5, label = \"point cloud\")\n for i in range(len(nb_pairs)):\n plt.scatter(nb_pairs[i][0][:, 0], nb_pairs[i][0][:, 1], s = 5, label = \"%d_1\" % i)\n plt.scatter(nb_pairs[i][1][:, 0], nb_pairs[i][1][:, 1], s = 5, label = \"%d_2\" % i)\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n plt.axis('equal')\n plt.grid()\n plt.legend()\n plt.show()\n \n if type(save_path) != type(None):\n fig.savefig(save_path)", "def generate_explore_views(self):\n views = []\n if self._safety_surface[\"type\"] == \"circle\":\n # Generate points evently distributed on the circle\n center = self._safety_surface[\"center\"]\n center = Vector3r(center[0], center[1], center[2])\n x0 = center.x_val\n y0 = center.y_val\n z0 = center.z_val\n radius = self._safety_surface[\"radius\"]\n TOTAL_NUM = self._config[\"point_num\"]\n ROUND_NUM = self._config.get(\"round_num\", 1)\n delta_theta = 2 * math.pi / (TOTAL_NUM / ROUND_NUM)\n\n for i in range(TOTAL_NUM):\n theta = delta_theta * i\n x = x0 + radius * math.sin(theta)\n y = y0 + radius * math.cos(theta)\n pitch = -45\n views.append(\n {\n \"position\": Vector3r(x, y, z0),\n \"yaw\": -1 * (0.5 * math.pi + theta),\n \"pitch\": pitch,\n }\n )\n elif self._safety_surface[\"type\"] == \"cylinder\":\n # Generate points spiral the cylinder\n top_center = self._safety_surface[\"top_center\"]\n top_center = Vector3r(top_center[0], top_center[1], top_center[2])\n x0 = top_center.x_val\n y0 = top_center.y_val\n bottom = self._safety_surface.get(\"bottom\", 0)\n height = top_center.z_val - bottom\n radius = self._safety_surface[\"radius\"]\n TOTAL_NUM = self._config[\"point_num\"]\n ROUND_NUM = self._config.get(\"round_num\", 1)\n START_PITCH = self._config.get(\"start_pitch\", -45)\n END_PITCH = self._config.get(\"end_pitch\", 45)\n delta_theta = 2 * math.pi / (TOTAL_NUM / ROUND_NUM)\n delta_height = height / (TOTAL_NUM - 1)\n delta_pitch = (END_PITCH - START_PITCH) / TOTAL_NUM\n for i in range(TOTAL_NUM):\n theta = delta_theta * i\n x = x0 + radius * math.sin(theta)\n y = y0 + radius * math.cos(theta)\n z = bottom + i * delta_height\n pitch = START_PITCH + i * delta_pitch\n views.append(\n {\n \"position\": Vector3r(x, y, z),\n \"yaw\": -1 * (0.5 * math.pi + theta),\n \"pitch\": pitch / 180 * math.pi,\n }\n )\n else:\n print(\n \"OfflineNavigator: unknown type of safety_surface (%s)\"\n % self._safety_surface[\"type\"]\n )\n\n return views", "def plot_wordcloud(word_list, file_name=\"\"):\n plt.figure(figsize=plt.figaspect(0.8), dpi=100)\n all_words = ' '.join([text for text in word_list])\n wordcloud = WordCloud(width=800, height=600, random_state=21, max_font_size=110).generate(all_words)\n plt.imshow(wordcloud, interpolation=\"bilinear\")\n plt.axis('off')\n if file_name:\n plt.savefig(file_name, bbox_inches='tight')\n plt.show()\n return", "def sample_and_plot(self):\n fig = plt.figure()\n ax = plt.axes(projection = '3d')\n ax.plot_surface(self.X, self.Y, self.sample(), cmap = plt.cm.jet, rstride = 2, cstride = 2, linewidth = 1)\n plt.show()", "def visualize(dataset, aff, ip, port):\n import neuroglancer\n\n snemi3d_dir = snemi3d.folder()\n neuroglancer.set_static_content_source(url='https://neuroglancer-demo.appspot.com')\n neuroglancer.set_server_bind_address(bind_address=ip, bind_port=port)\n viewer = neuroglancer.Viewer(voxel_size=[6, 6, 30])\n if aff:\n import augmentation\n augmentation.maybe_create_affinities(dataset)\n add_affinities(snemi3d_dir, dataset+'-affinities', viewer)\n else:\n add_file(snemi3d_dir, dataset+'-input', viewer)\n add_file(snemi3d_dir, dataset+'-labels', viewer)\n\n print('open your brower at:')\n print(viewer.__str__().replace('172.17.0.2', '54.166.106.209')) # Replace the second argument with your own server's ip address\n webbrowser.open(viewer.__str__())\n print(\"press any key to exit\")\n raw_input()", "def convertcloud(points):\n pcd = open3d.geometry.PointCloud()\n pcd.points = open3d.utility.Vector3dVector(points)\n return pcd", "def display(self):\n scatter_plot(self.points, self.hull_points, self.color, self.title)", "def visualise(self):\n\n scores, education = self.get_data()\n self.write_data(scores, education)\n\n return True", "def view(config_file):\n import open3d as o3d\n with open(config_file) as f:\n config = json.load(f)\n scenes = get_realsense_scenes(config['realsense_dir'])\n for scene in scenes:\n # if scene['scene_name'] != \"Scene_004\":\n # continue\n scene_data = get_data_from_scene(scene)\n logger.info(\"Visualizing - %s\", scene['scene_name'])\n pcd = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(scene_data['points3d']))\n o3d.visualization.draw_geometries_with_editing([pcd])\n pcd = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(scene_data['points3d_segmented']))\n o3d.visualization.draw_geometries([pcd])", "def stamp_collection(d_data='',**kwargs):\n\n GR = glo.global_results()\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n # Because some of the 25Mpc galaxies are HUGE\n if p.gal_index == 'all':\n gal_indices = np.arange(GR.N_gal)\n gal_indices = gal_indices[GR.R_max < 200.]\n print(len(gal_indices))\n else: \n gal_indices = p.gal_index\n\n\n print('TEST!!')\n gal_indices = [91,93,124,117,121,130,135,136,139,143,146,147,152,154,164,166,167,168,171,173,174,175,186,189,192,203,211,213,214,226,222,223,226,228,233,236]\n\n N_stamps_1 = 8\n N_stamps_2 = 6\n\n #zoom = 1.5\n\n counter = N_stamps_1 * N_stamps_2\n fignum = 0\n plotnum = 0\n\n for gal_index in gal_indices:\n\n if counter == N_stamps_1 * N_stamps_2:\n print('Creating new figure')\n fig, axes = plt.subplots(figsize=(20,20))\n # fig,(axs,cax) = plt.subplots(ncols=2,figsize = (20,30),\\\n # gridspec_kw={\"width_ratios\":[1, 0.05]})\n gs1 = mpl.gridspec.GridSpec(N_stamps_1, N_stamps_2,left=0.05,top=0.95,bottom=0.05,right=0.82)\n\n gal_ob = gal.galaxy(GR=GR, gal_index=gal_index)\n #simgas = aux.load_temp_file(gal_ob=gal_ob,data_type=p.sim_type)\n simgas = gal_ob.particle_data.get_dataframe('simgas',d_data=d_data)\n map2D,lab,max_scale = make_projection_map(simgas,prop=p.prop,pix_size_kpc=p.pix_size_kpc,scale=1.5)\n\n # Plot\n ax1 = plt.subplot(gs1[N_stamps_1*N_stamps_2 - counter])\n ax1.set_facecolor(\"black\")\n Rmax = max_scale/2\n # ax1 = axs[5*8 - counter]\n if p.log:\n map2D[map2D < 10.**p.vmin] = 10.**p.vmin/2\n map2D[map2D > 10.**p.vmax] = 10.**p.vmax\n map2D = np.log10(map2D)\n if not p.log:\n map2D[map2D < p.vmin] = p.vmin/2\n map2D[map2D > p.vmax] = p.vmax\n im = ax1.imshow(map2D,\\\n extent=[-Rmax,Rmax,-Rmax,Rmax],vmin=p.vmin,vmax=p.vmax,cmap=p.cmap)\n Rmax = p.R_max\n ax1.set_xlim([-Rmax,Rmax])\n ax1.set_ylim([-Rmax,Rmax])\n ax1.text(0.05,0.05,'G%i' % gal_index,\\\n fontsize=14,transform=ax1.transAxes,color='white')\n if p.prop == 'm':\n ax1.text(0.05,0.85,'M$_{gas}$=%.2eM$_{\\odot}$' % np.sum(simgas.m),\\\n fontsize=14,transform=ax1.transAxes,color='white')\n ax1.text(0.05,0.75,'SFR=%.2eM$_{\\odot}$/yr' % GR.SFR[gal_index],\\\n fontsize=14,transform=ax1.transAxes,color='white')\n ax1.text(0.05,0.65,'# gas particles: %i' % (len(simgas)),\\\n fontsize=14,transform=ax1.transAxes,color='white')\n\n ax1.set_xticklabels([])\n ax1.set_yticklabels([])\n ax1.set_aspect('equal')\n\n counter -= 1\n plotnum += 1\n\n print(gal_index, counter)\n if counter == 0 or gal_index == gal_indices[-1]:\n gs1.update(wspace=0.0, hspace=0.0)\n axes.set_xlabel('x [kpc]'); axes.set_ylabel('y [kpc]')\n cbar_ax = fig.add_axes([0.85, 0.06, 0.02, 0.85])\n cbar = fig.colorbar(im, cax=cbar_ax)\n cbar.set_label(label=lab,size=20)\n cbar.ax.tick_params(labelsize=14)\n print('Saving in ' + p.d_plot + 'sim_data/%s%s_map_%s_%s_gals_%i.png' % (p.sim_name,p.sim_run,p.prop,p.z1,fignum))\n # plt.tight_layout()\n if not os.path.isdir(p.d_plot + 'sim_data/'): os.mkdir(p.d_plot + 'sim_data/') \n plt.savefig(p.d_plot + 'sim_data/%s%s_map_%s_%s_gals_%i.png' % (p.sim_name,p.sim_run,p.prop,p.z1,fignum), format='png', dpi=250, facecolor='w')\n counter = N_stamps_1 * N_stamps_2\n fignum += 1\n plt.close('all')", "def plot_pade_figure(self):\n data_analysis = DatabaseData(dataframe=self.plot_data)\n print (data_analysis.dataframe.columns)\n data_analysis.run_pade_through_R(rscript='birch',get_inits_ev=True)\n data_analysis.create_precisions()\n data_analysis.extract_pade_curve()\n x_eos_kpts, y_eos, xs_err, ys_err, x_pade_kpts, y_pade = \\\n data_analysis.create_pade_bokeh_compat(properties=self.properties)\n print (type(self.properties), self.properties)\n if self.properties == 'B':\n ext = data_analysis.Bp\n print ('HERE AT PROPERTIES', ext, type(ext))\n elif self.properties == 'BP':\n ext = data_analysis.BPp\n elif self.properties == 'E0':\n ext = data_analysis.E0p\n elif self.properties == 'V0':\n ext = data_analysis.V0p\n p = figure(plot_height=400, plot_width=400,tools=\"pan,wheel_zoom,box_zoom,reset,previewsave\",\\\n x_axis_type=\"log\", x_axis_label='K-points per atom', title='Pade Extrapolate of {0} is {1}'.format(self.properties, str(ext)) )\n p.xaxis.axis_label = 'K-points per atom'\n p.line(x_pade_kpts, y_pade, color='red')\n p.circle(x_eos_kpts, y_eos,color='blue',size=5, line_alpha=0)\n p.multi_line(xs_err, ys_err, color='black')\n if self.properties == 'B':\n p.yaxis.axis_label = 'Bulk Modulus B (GPa)'\n elif self.properties == 'dB':\n p.yaxis.axis_label = 'Bulk Modulus Pressure Derivative'\n elif self.properties == 'E0':\n p.yaxis.axis_label = 'DFT Energy (eV/atom)'\n elif self.properties == 'V0':\n p.yaxis.axis_label = 'Volume (A^3/atom)'\n\n return p", "def visualize_output(self, keyframe_index):\n\n self.vis_counter += 1\n keyframe_image = self.images[keyframe_index]\n keyframe_depth = self.depths[keyframe_index]\n keyframe_pose = self.poses[keyframe_index]\n\n feed_dict = {\n self.images_placeholder: keyframe_image[np.newaxis],\n self.depths_placeholder: keyframe_depth[np.newaxis],\n self.poses_placeholder: keyframe_pose[np.newaxis],\n self.intrinsics_placeholder: self.intrinsics}\n\n keyframe_point_cloud, keyframe_point_colors = \\\n self.sess.run(self.outputs['point_cloud'], feed_dict=feed_dict)\n\n pointcloud = (keyframe_point_cloud, keyframe_point_colors)\n\n # only add the point cloud once in every 5 frames\n if self.vis_counter % 4 == 0:\n self.queue.put((pointcloud, keyframe_pose))\n \n else:\n self.queue.put((None, keyframe_pose))", "def plot(self, subsample=None, valid_instance_types=None):\n pt = self.sample_indices(subsample, valid_instance_types)\n\n x, y, z = self.pc[pt, 0], self.pc[pt, 1], self.pc[pt, 2]\n color = self.color[pt]\n\n return plot_pointcloud(x, y, z, color=color)", "def prep_pointcloud(input_dict,\n root_path,\n voxel_generator,\n target_assigner,\n db_sampler=None,\n max_voxels=20000,\n max_sweeps=10,\n remove_outside_points=False,\n training=True,\n create_targets=True,\n shuffle_points=False,\n remove_unknown=False,\n gt_rotation_noise=(-np.pi / 3, np.pi / 3),\n gt_loc_noise_std=(1.0, 1.0, 1.0),\n global_rotation_noise=(-np.pi / 4, np.pi / 4),\n global_scaling_noise=(0.95, 1.05),\n global_random_rot_range=(0.78, 2.35),\n global_translate_noise_std=(0, 0, 0),\n num_point_features=4,\n anchor_area_threshold=1,\n gt_points_drop=0.0,\n gt_drop_max_keep=10,\n remove_points_after_sample=True,\n anchor_cache=None,\n remove_environment=False,\n random_crop=False,\n reference_detections=None,\n out_size_factor=2,\n use_group_id=False,\n multi_gpu=False,\n min_points_in_gt=-1,\n random_flip_x=True,\n random_flip_y=True,\n sample_importance=1.0,\n out_dtype=np.float32):\n t = time.time()\n class_names = target_assigner.classes\n points = input_dict[\"lidar\"][\"points\"]\n indices = input_dict[\"lidar\"][\"indices\"]\n origins = input_dict[\"lidar\"][\"origins\"]\n if training:\n anno_dict = input_dict[\"lidar\"][\"annotations\"]\n gt_dict = {\n \"gt_boxes\": anno_dict[\"boxes\"],\n \"gt_names\": anno_dict[\"names\"],\n \"gt_importance\": np.ones([anno_dict[\"boxes\"].shape[0]], dtype=anno_dict[\"boxes\"].dtype),\n }\n if \"difficulty\" not in anno_dict:\n difficulty = np.zeros([anno_dict[\"boxes\"].shape[0]],\n dtype=np.int32)\n gt_dict[\"difficulty\"] = difficulty\n else:\n gt_dict[\"difficulty\"] = anno_dict[\"difficulty\"]\n if use_group_id and \"group_ids\" in anno_dict:\n group_ids = anno_dict[\"group_ids\"]\n gt_dict[\"group_ids\"] = group_ids\n calib = None\n if \"calib\" in input_dict:\n calib = input_dict[\"calib\"]\n\n # # Disable these two since we do not do this for NuScenes\n # if reference_detections is not None:\n # assert calib is not None and \"image\" in input_dict\n # C, R, T = box_np_ops.projection_matrix_to_CRT_kitti(P2)\n # frustums = box_np_ops.get_frustum_v2(reference_detections, C)\n # frustums -= T\n # frustums = np.einsum('ij, akj->aki', np.linalg.inv(R), frustums)\n # frustums = box_np_ops.camera_to_lidar(frustums, rect, Trv2c)\n # surfaces = box_np_ops.corner_to_surfaces_3d_jit(frustums)\n # masks = points_in_convex_polygon_3d_jit(points, surfaces)\n # points = points[masks.any(-1)]\n # if remove_outside_points:\n # assert calib is not None\n # image_shape = input_dict[\"image\"][\"image_shape\"]\n # points = box_np_ops.remove_outside_points(\n # points, calib[\"rect\"], calib[\"Trv2c\"], calib[\"P2\"], image_shape)\n\n # # Very interesting attempt\n # # I have tried the same and found it doesn't really work\n # if remove_environment is True and training:\n # selected = kitti.keep_arrays_by_name(gt_names, target_assigner.classes)\n # _dict_select(gt_dict, selected)\n # masks = box_np_ops.points_in_rbbox(points, gt_dict[\"gt_boxes\"])\n # points = points[masks.any(-1)]\n\n metrics = {}\n\n point_indices_to_remove = None\n if training:\n \"\"\"\n boxes_lidar = gt_dict[\"gt_boxes\"]\n bev_map = simplevis.nuscene_vis(points, boxes_lidar)\n cv2.imshow('pre-noise', bev_map)\n \"\"\"\n selected = kitti.drop_arrays_by_name(gt_dict[\"gt_names\"], [\"Denture\"])\n _dict_select(gt_dict, selected)\n if remove_unknown:\n remove_mask = gt_dict[\"difficulty\"] == -1\n \"\"\"\n gt_boxes_remove = gt_boxes[remove_mask]\n gt_boxes_remove[:, 3:6] += 0.25\n points = prep.remove_points_in_boxes(points, gt_boxes_remove)\n \"\"\"\n keep_mask = np.logical_not(remove_mask)\n _dict_select(gt_dict, keep_mask)\n gt_dict.pop(\"difficulty\")\n\n # This part is interesting - we will need to do the same\n if min_points_in_gt > 0:\n # points_count_rbbox takes 10ms with 10 sweeps nuscenes data\n point_counts = box_np_ops.points_count_rbbox(points, gt_dict[\"gt_boxes\"])\n mask = point_counts >= min_points_in_gt\n _dict_select(gt_dict, mask)\n\n gt_boxes_mask = np.array(\n [n in class_names for n in gt_dict[\"gt_names\"]], dtype=np.bool_)\n\n if db_sampler is not None:\n group_ids = None\n if \"group_ids\" in gt_dict:\n group_ids = gt_dict[\"group_ids\"]\n\n sampled_dict = db_sampler.sample_all(\n root_path,\n gt_dict[\"gt_boxes\"],\n gt_dict[\"gt_names\"],\n num_point_features,\n random_crop,\n gt_group_ids=group_ids,\n calib=calib)\n\n if sampled_dict is not None:\n sampled_gt_names = sampled_dict[\"gt_names\"]\n sampled_gt_boxes = sampled_dict[\"gt_boxes\"]\n sampled_points = sampled_dict[\"points\"]\n sampled_gt_masks = sampled_dict[\"gt_masks\"]\n gt_dict[\"gt_names\"] = np.concatenate(\n [gt_dict[\"gt_names\"], sampled_gt_names], axis=0)\n gt_dict[\"gt_boxes\"] = np.concatenate(\n [gt_dict[\"gt_boxes\"], sampled_gt_boxes])\n gt_boxes_mask = np.concatenate(\n [gt_boxes_mask, sampled_gt_masks], axis=0)\n sampled_gt_importance = np.full(\n [sampled_gt_boxes.shape[0]], sample_importance,\n dtype=sampled_gt_boxes.dtype)\n gt_dict[\"gt_importance\"] = np.concatenate(\n [gt_dict[\"gt_importance\"], sampled_gt_importance])\n\n if group_ids is not None:\n sampled_group_ids = sampled_dict[\"group_ids\"]\n gt_dict[\"group_ids\"] = np.concatenate(\n [gt_dict[\"group_ids\"], sampled_group_ids])\n\n # # Commented out because we have a new way of removing points\n # if remove_points_after_sample:\n # masks = box_np_ops.points_in_rbbox(points, sampled_gt_boxes)\n # point_indices_to_remove = np.flatnonzero(masks.any(-1))\n # # # Delay this process so we can use the full point cloud\n # # # when we do the ray stopping algorithm\n # # points = points[np.logical_not(masks.any(-1))]\n\n # # Paste objects behind so that we don't have to update indices\n # points = np.concatenate([sampled_points, points], axis=0)\n points = np.concatenate([points, sampled_points], axis=0)\n\n pc_range = voxel_generator.point_cloud_range\n group_ids = None\n if \"group_ids\" in gt_dict:\n group_ids = gt_dict[\"group_ids\"]\n\n # # Disable this one for now (not used in PointPillars anyways)\n # prep.noise_per_object_v3_(\n # gt_dict[\"gt_boxes\"],\n # points,\n # gt_boxes_mask,\n # rotation_perturb=gt_rotation_noise,\n # center_noise_std=gt_loc_noise_std,\n # global_random_rot_range=global_random_rot_range,\n # group_ids=group_ids,\n # num_try=100)\n\n # should remove unrelated objects after noise per object\n # for k, v in gt_dict.items():\n # print(k, v.shape)\n _dict_select(gt_dict, gt_boxes_mask)\n gt_classes = np.array(\n [class_names.index(n) + 1 for n in gt_dict[\"gt_names\"]],\n dtype=np.int32)\n gt_dict[\"gt_classes\"] = gt_classes\n gt_dict[\"gt_boxes\"], points, origins = prep.random_flip(\n gt_dict[\"gt_boxes\"], points, origins, 0.5, random_flip_x, random_flip_y)\n gt_dict[\"gt_boxes\"], points, origins = prep.global_rotation_v2(\n gt_dict[\"gt_boxes\"], points, origins, *global_rotation_noise)\n gt_dict[\"gt_boxes\"], points, origins = prep.global_scaling_v2(\n gt_dict[\"gt_boxes\"], points, origins, *global_scaling_noise)\n prep.global_translate_(\n gt_dict[\"gt_boxes\"], points, origins, global_translate_noise_std)\n bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]\n mask = prep.filter_gt_box_outside_range_by_center(gt_dict[\"gt_boxes\"], bv_range)\n _dict_select(gt_dict, mask)\n\n # limit rad to [-pi, pi]\n gt_dict[\"gt_boxes\"][:, 6] = box_np_ops.limit_period(\n gt_dict[\"gt_boxes\"][:, 6], offset=0.5, period=2 * np.pi)\n\n # boxes_lidar = gt_dict[\"gt_boxes\"]\n # bev_map = simplevis.nuscene_vis(points, boxes_lidar)\n # cv2.imshow('post-noise', bev_map)\n # cv2.waitKey(0)\n\n # # Disable this for now (not used in PointPillars anyways)\n # if shuffle_points:\n # # shuffle is a little slow.\n # np.random.shuffle(points)\n\n # [0, -40, -3, 70.4, 40, 1]\n voxel_size = voxel_generator.voxel_size\n pc_range = voxel_generator.point_cloud_range\n grid_size = voxel_generator.grid_size\n\n # organize points into lists based on timestamps\n time_stamps = points[indices[:-1], -1] # counting on the fact we do not miss points from any intermediate time_stamps\n time_stamps = (time_stamps[:-1]+time_stamps[1:])/2\n time_stamps = [-1000.0] + time_stamps.tolist() + [1000.0] # add boundaries\n time_stamps = np.array(time_stamps)\n\n # # LL_OCCUPIED, LL_FREE = 0.85, -0.4\n # lo_occupied = np.log(0.7 / (1 - 0.7))\n # lo_free = np.log(0.4 / (1 - 0.4))\n\n # is there are additional points (from database sampling)\n num_original = indices[-1]\n if len(points) > num_original:\n # split data into two half (indexed and un-indexed)\n original_points, sampled_points = points[:num_original], points[num_original:]\n # compute occupancy and masks\n # visibility, original_mask, sampled_mask = mapping.compute_visibility_and_masks(\n # original_points, sampled_points, origins, time_stamps, pc_range, min(voxel_size)\n # )\n logodds, original_mask, sampled_mask = mapping.compute_logodds_and_masks(\n original_points, sampled_points, origins, time_stamps, pc_range, min(voxel_size) # , lo_occupied, lo_free\n )\n # apply visible mask\n points = np.concatenate((original_points[original_mask], sampled_points[sampled_mask]))\n else:\n # visibility = mapping.compute_visibility(\n # points, origins, time_stamps, pc_range, min(voxel_size)\n # )\n logodds = mapping.compute_logodds(\n points, origins, time_stamps, pc_range, min(voxel_size) #, lo_occupied, lo_free\n )\n\n # T = len(time_stamps)-1\n # visibility = visibility.reshape(T, -1)\n # if T < (1 + max_sweeps):\n # visibility = np.pad(visibility, ((0, (1+max_sweeps)-T), (0,0)), 'edge')\n\n # with open(f'./utils/mapping/examples/{time.time()}.pkl', 'wb') as f:\n # ##\n # pickle.dump(original_points, f)\n # pickle.dump(sampled_points, f)\n # pickle.dump(origins, f)\n # pickle.dump(time_stamps, f)\n # pickle.dump(pc_range, f)\n # pickle.dump(voxel_size, f)\n # ##\n # pickle.dump(occupancy, f)\n # pickle.dump(original_mask, f)\n # pickle.dump(sampled_mask, f)\n\n if training:\n if min_points_in_gt > 0:\n # points_count_rbbox takes 10ms with 10 sweeps nuscenes data\n point_counts = box_np_ops.points_count_rbbox(points, gt_dict[\"gt_boxes\"])\n mask = point_counts >= min_points_in_gt\n _dict_select(gt_dict, mask)\n\n # [352, 400]\n t1 = time.time()\n if not multi_gpu:\n res = voxel_generator.generate(\n points, max_voxels)\n voxels = res[\"voxels\"]\n coordinates = res[\"coordinates\"]\n num_points = res[\"num_points_per_voxel\"]\n num_voxels = np.array([voxels.shape[0]], dtype=np.int64)\n else:\n res = voxel_generator.generate_multi_gpu(\n points, max_voxels)\n voxels = res[\"voxels\"]\n coordinates = res[\"coordinates\"]\n num_points = res[\"num_points_per_voxel\"]\n num_voxels = np.array([res[\"voxel_num\"]], dtype=np.int64)\n metrics[\"voxel_gene_time\"] = time.time() - t1\n example = {\n 'voxels': voxels,\n # 'visibility': visibility,\n 'logodds': logodds, \n 'num_points': num_points,\n 'coordinates': coordinates,\n \"num_voxels\": num_voxels,\n \"metrics\": metrics,\n }\n if calib is not None:\n example[\"calib\"] = calib\n feature_map_size = grid_size[:2] // out_size_factor\n feature_map_size = [*feature_map_size, 1][::-1]\n # print(f'feature_map_size in prep_pointcloud(): {feature_map_size}')\n if anchor_cache is not None:\n # print('having anchor cache')\n anchors = anchor_cache[\"anchors\"]\n anchors_bv = anchor_cache[\"anchors_bv\"]\n anchors_dict = anchor_cache[\"anchors_dict\"]\n matched_thresholds = anchor_cache[\"matched_thresholds\"]\n unmatched_thresholds = anchor_cache[\"unmatched_thresholds\"]\n\n else:\n # print('NOT having anchor cache')\n ret = target_assigner.generate_anchors(feature_map_size)\n anchors = ret[\"anchors\"]\n anchors = anchors.reshape([-1, target_assigner.box_ndim])\n anchors_dict = target_assigner.generate_anchors_dict(feature_map_size)\n anchors_bv = box_np_ops.rbbox2d_to_near_bbox(\n anchors[:, [0, 1, 3, 4, 6]])\n matched_thresholds = ret[\"matched_thresholds\"]\n unmatched_thresholds = ret[\"unmatched_thresholds\"]\n # print(f'anchors.shape: {anchors.shape}')\n\n example[\"anchors\"] = anchors\n anchors_mask = None\n if anchor_area_threshold >= 0:\n # slow with high resolution. recommend disable this forever.\n coors = coordinates\n dense_voxel_map = box_np_ops.sparse_sum_for_anchors_mask(\n coors, tuple(grid_size[::-1][1:]))\n dense_voxel_map = dense_voxel_map.cumsum(0)\n dense_voxel_map = dense_voxel_map.cumsum(1)\n anchors_area = box_np_ops.fused_get_anchors_area(\n dense_voxel_map, anchors_bv, voxel_size, pc_range, grid_size)\n anchors_mask = anchors_area > anchor_area_threshold\n # example['anchors_mask'] = anchors_mask.astype(np.uint8)\n example['anchors_mask'] = anchors_mask\n # print(\"prep time\", time.time() - t)\n metrics[\"prep_time\"] = time.time() - t\n if not training:\n return example\n example[\"gt_names\"] = gt_dict[\"gt_names\"]\n # voxel_labels = box_np_ops.assign_label_to_voxel(gt_boxes, coordinates,\n # voxel_size, coors_range)\n if create_targets:\n t1 = time.time()\n targets_dict = target_assigner.assign(\n anchors,\n anchors_dict,\n gt_dict[\"gt_boxes\"],\n anchors_mask,\n gt_classes=gt_dict[\"gt_classes\"],\n gt_names=gt_dict[\"gt_names\"],\n matched_thresholds=matched_thresholds,\n unmatched_thresholds=unmatched_thresholds,\n importance=gt_dict[\"gt_importance\"])\n\n \"\"\"\n boxes_lidar = gt_dict[\"gt_boxes\"]\n bev_map = simplevis.nuscene_vis(points, boxes_lidar, gt_dict[\"gt_names\"])\n assigned_anchors = anchors[targets_dict['labels'] > 0]\n ignored_anchors = anchors[targets_dict['labels'] == -1]\n bev_map = simplevis.draw_box_in_bev(bev_map, [-50, -50, 3, 50, 50, 1], ignored_anchors, [128, 128, 128], 2)\n bev_map = simplevis.draw_box_in_bev(bev_map, [-50, -50, 3, 50, 50, 1], assigned_anchors, [255, 0, 0])\n cv2.imshow('anchors', bev_map)\n cv2.waitKey(0)\n\n boxes_lidar = gt_dict[\"gt_boxes\"]\n pp_map = np.zeros(grid_size[:2], dtype=np.float32)\n voxels_max = np.max(voxels[:, :, 2], axis=1, keepdims=False)\n voxels_min = np.min(voxels[:, :, 2], axis=1, keepdims=False)\n voxels_height = voxels_max - voxels_min\n voxels_height = np.minimum(voxels_height, 4)\n # sns.distplot(voxels_height)\n # plt.show()\n pp_map[coordinates[:, 1], coordinates[:, 2]] = voxels_height / 4\n pp_map = (pp_map * 255).astype(np.uint8)\n pp_map = cv2.cvtColor(pp_map, cv2.COLOR_GRAY2RGB)\n pp_map = simplevis.draw_box_in_bev(pp_map, [-50, -50, 3, 50, 50, 1], boxes_lidar, [128, 0, 128], 1)\n cv2.imshow('heights', pp_map)\n cv2.waitKey(0)\n \"\"\"\n example.update({\n 'labels': targets_dict['labels'],\n 'reg_targets': targets_dict['bbox_targets'],\n # 'reg_weights': targets_dict['bbox_outside_weights'],\n 'importance': targets_dict['importance'],\n })\n return example", "def addDataPoints(self):\n pass", "def show(data_set, number_points: int):\n print(f'info: Showing {number_points} as maximum.')\n sub_set_points = np.random.choice(range(data_set.shape[0]), size=min(data_set.shape[0], number_points))\n x = data_set[sub_set_points, 0]\n y = data_set[sub_set_points, 1]\n z = data_set[sub_set_points, 2]\n\n fig = plt.figure(figsize=(8, 8))\n ax = mplot3d.Axes3D(fig)\n ax.set_title('NMSLIB index 3D representation', fontsize=20)\n ax.scatter(xs=x, ys=y, zs=z)\n plt.show()", "def show_data(self):\n\n self.area_canvas.axes.cla()\n self.draw_scatterplot(self.scatter_canvas, 'x [µm]', 'y [µm]', self.p_inputs['flip y-axis'].isChecked())\n self.draw_hist(self.area_canvas, 'area', 'cluster area [µm²]', 'number of clusters')\n self.draw_hist(self.number_canvas, 'nclusters', 'number of cluster', 'number of regions')\n self.draw_hist(self.density_canvas, 'density', 'cluster density [µm⁻²]', 'number of clusters')\n self.draw_hist(self.percentage_canvas, 'pclustered', 'percentage clustered',\n 'number of regions')\n self.draw_hist(self.ratio_canvas, 'reldensity', 'relative density clusters/background',\n 'number of regions')", "def generate_image(self,true_dist):\n N_POINTS = 128\n RANGE = 3\n\n points = np.zeros((N_POINTS, N_POINTS, 2), dtype='float32')\n points[:, :, 0] = np.linspace(-RANGE, RANGE, N_POINTS)[:, None]\n points[:, :, 1] = np.linspace(-RANGE, RANGE, N_POINTS)[None, :]\n points = points.reshape((-1, 2))\n\n plt.clf()\n\n #true_dist = true_dist.cpu().data.numpy()\n samples = self.G(self.sample_z_)\n print('generate size is',samples.size())\n samples = samples.cpu().data.numpy()\n\n x = y = np.linspace(-RANGE, RANGE, N_POINTS)\n # plt.contour(x, y, disc_map.reshape((len(x), len(y))).transpose())\n\n plt.scatter(true_dist[:, 0], true_dist[:, 1], c='orange', marker='+')\n plt.scatter(samples[:, 0], samples[:, 1], c='green', marker='+')\n plt.show()", "def show_pair_points(points, pairs, title, save_path = None):\n\n fig = plt.figure(figsize = (6, 6))\n plt.title(title)\n plt.scatter(points[:, 0], points[:, 1], c = 'b', s = 5, label = \"point cloud\")\n for pair in pairs:\n print(pair)\n point_1 = points[pair[0]]\n point_2 = points[pair[1]]\n plt.scatter([point_1[0], point_2[0]], [point_1[1], point_2[1]], s = 10)\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n plt.axis('equal')\n plt.grid()\n plt.legend()\n plt.show()\n \n if type(save_path) != type(None):\n fig.savefig(save_path)", "def test_plot_cspad(geometry, fname_data, amp_range=(0,0.5)):\n #rad1 = 93\n #rad2 = 146\n rad1 = 655\n rad2 = 670\n\n # get pixel coordinate index arrays:\n xyc = xc, yc = 500, 500# None\n\n #rows, cols = geometry.get_pixel_coord_indexes(xy0_off_pix=None)\n rows, cols = geometry.get_pixel_coord_indexes(xy0_off_pix=xyc, do_tilt=True)\n\n ixo, iyo = geometry.point_coord_indexes(xy0_off_pix=xyc, do_tilt=True)\n logger.info('Detector origin indexes ixo:%d iyo:%d' % (ixo, iyo))\n\n root, ext = os.path.splitext(fname_data)\n arr = np.load(fname_data) if ext == '.npy' else np.loadtxt(fname_data, dtype=np.float)\n arr.shape= (4,8,185,388)\n\n logger.info('shapes rows: %s cols: %s weight: %s' % (str(rows.shape), str(cols.shape), str(arr.shape)))\n\n arr.shape = rows.shape\n img = img_from_pixel_arrays(rows, cols, W=arr)\n\n rcc_ring = (iyo, ixo)\n axim = gg.plotImageLarge(img,amp_range=amp_range)\n gg.drawCircle(axim, rcc_ring, rad1, linewidth=1, color='w', fill=False)\n gg.drawCircle(axim, rcc_ring, rad2, linewidth=1, color='w', fill=False)\n gg.drawCenter(axim, rcc_ring, rad1, linewidth=1, color='w')\n gg.move(500,10)\n gg.show()", "def visualize_waypoints(current_robot_pose, current_camera_pose,\n marker_id, last_robot_pose=None):\n\n waypoints = rospy.get_param(\"/scene_exploration_sm/waypoints\")\n publisher = rospy.Publisher(waypoints, Marker, queue_size=10)\n points = []\n rospy.logdebug(\"last_robot_pose is: \" + str(last_robot_pose))\n if last_robot_pose is not None:\n points.append(Point(last_robot_pose.position.x,\n last_robot_pose.position.y,\n 0))\n points.append(Point(current_robot_pose.position.x,\n current_robot_pose.position.y,\n 0))\n\n if current_camera_pose is not None:\n z_pose = current_camera_pose.position.z\n else:\n z_pose = 1.35\n\n center = Point(current_robot_pose.position.x,\n current_robot_pose.position.y,\n z_pose / 2.0)\n pose_marker = Marker()\n pose_marker.header.stamp = rospy.Time.now()\n pose_marker.header.frame_id = '/map'\n pose_marker.ns = 'waypoints_cyl'\n pose_marker.type = Marker.CYLINDER\n pose_marker.id = marker_id + 2\n pose_marker.action = Marker.ADD\n pose_marker.scale = Vector3(0.05, 0.05, z_pose)\n pose_marker.color = ColorRGBA(0, 0, 1, 1)\n pose_marker.lifetime = rospy.Duration()\n pose_marker.pose.position = center\n\n # Only on the first try we have to wait for the publisher,\n # next times we know the last pose and this won't be executed\n if last_robot_pose is None:\n rospy.loginfo(\"Sleeping till waypoint publisher is ready\")\n rospy.sleep(1)\n\n publisher.publish(pose_marker)\n\n marker = Marker()\n marker.header.stamp = rospy.Time.now()\n marker.header.frame_id = '/map'\n marker.ns = 'waypoints_lines'\n marker.type = Marker.LINE_LIST\n marker.id = marker_id\n marker.action = Marker.ADD\n marker.scale = Vector3(0.02, 0.1, 0.1)\n marker.color = ColorRGBA(0, 1, 1, 1)\n marker.lifetime = rospy.Duration()\n marker.points = points\n\n publisher.publish(marker)\n\n if current_camera_pose is not None:\n arrow = Marker()\n arrow.header.stamp = rospy.Time.now()\n arrow.header.frame_id = '/map'\n arrow.ns = 'waypoints_arrows'\n arrow.pose = current_camera_pose\n arrow.type = Marker.ARROW\n arrow.id = marker_id + 1\n arrow.action = Marker.ADD\n arrow.scale = Vector3(0.5, 0.02, 0.02)\n arrow.color = ColorRGBA(1, 1, 0, 1)\n arrow.lifetime = rospy.Duration()\n\n publisher.publish(arrow)", "def visualize(vis, features, label):\n if vis == 'PCA':\n #n_components = st.sidebar.slider(\"n_components\", 2, 10)\n #alpha = st.sidebar.slider(\"alpha\", 0.8, 2.0)\n #pca = PCA(n_components)\n pca = PCA(2)\n\n X_projected = pca.fit_transform(features)\n \n x1 = X_projected[:, 0]\n x2 = X_projected[:, 1]\n\n\n fig = plt.figure()\n plt.scatter(x1, x2, c=label, alpha=0.8, cmap='viridis')\n plt.xlabel(\"Principal Component 1\")\n plt.ylabel(\"Principal Component 2\")\n plt.colorbar()\n\n st.pyplot()", "def test_visualize():\n # Instantiate three particles for testing\n particles = [Particle(0.3, 0.5, 1), \n Particle(0.0, -0.5, -1), \n Particle(-0.1, -0.4, 3)]\n simulator = ParticleSimulator(particles)\n visualize(simulator)", "def visualize(self, reduced_data):\n\t\t# Step size of the mesh. Decrease to increase the quality of the VQ.\n\t\th = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].\n\t\t\n\t\t# Plot the decision boundary. For that, we will assign a color to each\n\t\tx_min, x_max = reduced_data[:, 0].min() + 1, reduced_data[:, 0].max() - 1\n\t\ty_min, y_max = reduced_data[:, 1].min() + 1, reduced_data[:, 1].max() - 1\n\t\txx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n\n\t\t# Obtain labels for each point in mesh. Use last trained model.\n\t\tZ = self.estimator.predict(np.c_[xx.ravel(), yy.ravel()])\n\n\t\t# Put the result into a color plot\n\t\tZ = Z.reshape(xx.shape)\n\t\t\n\t\tplt.figure(1)\n\t\tplt.clf()\n\t\tplt.imshow(Z, interpolation='nearest',\n\t\t extent=(xx.min(), xx.max(), yy.min(), yy.max()),\n\t\t cmap=plt.cm.Paired,\n\t\t aspect='auto', origin='lower')\n\n\t\tplt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=4)\n\t\t# Plot the centroids as a white X\n\t\tcentroids = self.estimator.cluster_centers_\n\t\tplt.scatter(centroids[:, 0], centroids[:, 1],\n\t\t marker='x', s=169, linewidths=3,\n\t\t color='w', zorder=10)\n\t\tplt.title('K-means clustering with random data (PCA-reduced data)\\n'\n\t\t 'Centroids are marked with white cross')\n\t\tplt.xlim(x_min, x_max)\n\t\tplt.ylim(y_min, y_max)\n\t\tplt.xticks(())\n\t\tplt.yticks(())\n\t\tplt.show()", "def generate_json_pointcloud(rgb_file, depth_file, json_file):\n rgb = Image.open(rgb_file)\n depth = Image.open(depth_file)\n rgb = rgb.transpose(Image.FLIP_TOP_BOTTOM)\n depth = depth.transpose(Image.FLIP_TOP_BOTTOM)\n print(depth.mode)\n print(rgb.mode)\n if rgb.size != depth.size:\n raise Exception(\"Color and depth image do not have the same resolution.\")\n if rgb.mode != \"RGB\":\n raise Exception(\"Color image is not in RGB format\")\n if depth.mode != \"L\":\n raise Exception(\"Depth image is not in intensity format\")\n\n points = []\n for v in range(rgb.size[1]):\n for u in range(rgb.size[0]):\n color = rgb.getpixel((u, v))\n Z = depth.getpixel((u, v)) *.22\n if Z == 0: continue\n Y = .22 * v\n X = .22 * u\n points.append(str(X) + ' ' + str(Y) + ' ' + str(Z))\n points.append(str(color[0]) + ' ' + str(color[1]) + ' ' + str(color[2]))\n print('length is:', len(points))\n with open(json_file, 'w') as outfile:\n json.dump(points, outfile)\n outfile.close()", "def proc_and_pub_pointcloud(self):\n image = self.sonar_result[\"image_array\"]\n\n # Compute range and bearing maps using the ping result\n ping_result = self.sonar_result[\"ping_result\"].data\n resolution = ping_result[8]\n [rows, cols] = np.shape(image)\n range_max = rows*resolution\n ranges = np.linspace(0,range_max, rows)\n\n # Check image frequency\n high_freq = (ping_result[2] > 1000000)\n if high_freq:\n # bearings = np.tile(self.high_freq_brgs, (rows, 1))\n bearing_mesh, range_mesh = np.meshgrid(self.high_freq_brgs, ranges)\n else:\n # bearings = np.tile(self.low_freq_brgs, (rows, 1))\n bearing_mesh, range_mesh = np.meshgrid(self.high_freq_brgs, ranges)\n\n # TODO: Turn this into a gate (like the MBES)\n # Threshold image\n ret, image = cv2.threshold(image, self.image_threshold, 255, cv2.THRESH_TOZERO)\n # TODO: Maybe can tune parameters better, good results with current values.\n # Detect edges with second Laplacian and processes the shit out of the image\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(5,5))\n image = clahe.apply(image)\n image = cv2.Canny(image, 200, 255, L2gradient=True)\n kernel = np.ones((5,5), np.uint8)\n image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel)\n edges = np.argmax(image, axis=0)\n cols = np.arange(0,len(edges),1)\n image_out = np.zeros(image.shape, np.uint8)\n image_out[edges,cols] = 255\n\n # TODO: send the closest point's distance via UDP to Dune\n min_distance = ranges[np.min(edges)]\n self.udp_socket.sendto(min_distance, (\"\", 7777))\n\n if self.publish_edges:\n # Publish processed edges as an image\n image_msg = self.bridge.cv2_to_imgmsg(image_out, encoding=\"passthrough\")\n self.image_pub.publish(image_msg)\n\n if self.publish_pointcloud:\n # Publish processed edges as a 2D pointcloud\n pointcloud_msg = self._build_pcl2_msg(image_out)\n self.point_pub.publish(pointcloud_msg)", "def __init__(self, src, dst, plot=True, reinit=False):\n self.src = src\n self.dst = dst\n assert self.src.num == self.dst.num\n self.result = PointCloud(src)\n self.plot = plot\n self.reinit = reinit\n if self.plot: self.plotter = PointCloudPlotter(self.dst)", "def PointCloudfromStructOutput(self,file):\n print(\"Creating Structure Point Cloud\")\n xyz = self.readStructOutput(file)\n pc = np.zeros((int(len(xyz)/2.0),3))\n pc[:,0] = xyz[::2,0]*1000\n pc[:,1] = xyz[::2,1]*1000\n pc[:,2] = xyz[::2,2]*1000\n head = \"\"\"X,Y,Z\"\"\"\n np.savetxt(file, pc, delimiter=',',fmt='%.10f', header=head)\n return", "def plot(self):\n pass", "def create_annotation_data(self):\n for i, hp in enumerate(self.hanging_point_in_camera_coords_list):\n px, py = self.camera_model.project3d_to_pixel(hp.worldpos())\n if self.save_debug_image:\n self.bgr_axis = self.bgr.copy()\n if 0 <= px < self.target_width and 0 <= py < self.target_height:\n if self.save_debug_image:\n draw_axis(self.bgr_axis,\n hp.worldrot(),\n hp.worldpos(),\n self.camera_model.K)\n create_gradient_circle(\n self.annotation_img,\n int(py), int(px))\n if self.visible_labels == []:\n self.annotation_data.append(\n {'xy': [int(px), int(py)],\n 'depth': hp.worldpos()[2] * 1000,\n 'quaternion': hp.quaternion.tolist()}\n )\n else:\n self.annotation_data.append(\n {'xy': [int(px), int(py)],\n 'depth': hp.worldpos()[2] * 1000,\n 'quaternion': hp.quaternion.tolist(),\n 'label': self.visible_labels[i]}\n )\n self.rotation_map.add_quaternion(\n int(px), int(py), hp.quaternion)\n\n # self.depth_map.add_depth(\n # int(px), int(py),\n # hp.worldpos()[2] * 1000)\n\n if np.all(self.annotation_img == 0):\n print('out of camera')\n return False\n\n self.annotation_img \\\n = self.annotation_img / self.annotation_img.max() * 255\n self.annotation_img = self.annotation_img.astype(np.uint8)\n\n self.rotations = self.rotation_map.rotations\n\n # self.hanging_points_depth = self.depth_map.on_depth_image(self.depth)\n\n return True", "def main():\n # Placing imports here so it will be imported only if user want to test algorithm, not when importing\n # Class DepthCameraServer\n\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n import sensors_classes as sensors\n from images_processing_class import ImagesProcessing\n import struct\n import time\n\n # Starting Thread which receives data from VideoCamera, port od thread's socket must be the same as the port at\n # which data from VideoCamera is redirected, to be sure check where VideoCamera data stream is send in script env.py\n depth_camera_server = DepthCameraServer('localhost', 60012)\n depth_camera_server.run()\n\n pose_server = sensors.Pose_server('localhost', 60007)\n pose_server.run()\n\n # Waiting 1 sec to be sure than depth_camera_server has received minimum 1 image, because program will crash if\n # depth_camera_server doesn't have time to receive an image\n time.sleep(1)\n\n points = depth_camera_server.get_points()\n\n lista_punktow = []\n x = []\n y = []\n z = []\n\n data_pose_dict = pose_server.get_all()\n pose_x = data_pose_dict['x']\n pose_y = data_pose_dict['y']\n pose_z = data_pose_dict['z']\n\n yawp = data_pose_dict['yaw']\n pitchp = data_pose_dict['pitch']\n rollp = data_pose_dict['roll']\n\n # Each 3D point is a set of float(x,y,z). Each point has a size of 12 bytes because\n # 3*sizeof(float) = 12 bytes, that's why we are dividing data into parts with size of 12 and then\n # converting this data to tuple with 3 float (xyz).\n\n #\n # Processing cloud of points to seperate x, y and z was copied from dcam_old.py\n #\n\n for i in range(0, len(points) - 12, 12):\n xyz = struct.unpack('fff', points[i:i + 12])\n\n # rotation is included\n x1p, y1p, z1p = rotation(xyz[2], xyz[0], xyz[1], yawp, pitchp, rollp)\n\n # data from pose is included\n xp = round(x1p + pose_x, 1)\n yp = round(y1p + pose_y, 1)\n zp = round(z1p + pose_z, 1)\n temp = [xp, yp, zp]\n lista_punktow.append(temp)\n\n # Choosing only these points which have minimum 0.45 meters at z-axis, but why???\n for i in lista_punktow:\n x.append(i[0])\n y.append(i[1])\n z.append(i[2])\n\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.scatter(x, y, z, cmap='viridis', linewidth=0.5)\n ax.scatter(x[0], y[0], z[0], c='red')\n ax.scatter(x[1], y[1], z[1], c='yellow')\n ax.scatter(x[2], y[2], z[2], c='black')\n ax.scatter(pose_x, pose_y, pose_z, c='green')\n plt.show()", "def visualize_in_2d(self):\n ae = Autoencoder_test(self.data)\n self.code = ae.encode(n_dimension=2, learning_rate=0.01, training_epochs=10, batch_size=400)\n for i in range(len(self.cluster)):\n list_x = []\n list_y = []\n for j in self.cluster[i]:\n list_x.append(self.code[0][j,0])\n list_y.append(self.code[0][j,1])\n plt.scatter(list_x,list_y)\n plt.show()\n return", "def render_radar_point_cloud(self, point_cloud, extrinsics=Pose(), color=RED, velocity=None, velocity_scale=10):\n combined_transform = self._bev_rotation * extrinsics\n\n pointcloud_in_bev = combined_transform * point_cloud\n point_cloud2d = pointcloud_in_bev[:, :2]\n\n point_cloud2d[:, 0] = (self._center_pixel[0] + point_cloud2d[:, 0] * self._pixels_per_meter)\n point_cloud2d[:, 1] = (self._center_pixel[1] + point_cloud2d[:, 1] * self._pixels_per_meter)\n\n H, W = self.data.shape[:2]\n uv = point_cloud2d.astype(np.int32)\n in_view = np.logical_and.reduce([\n (point_cloud2d >= 0).all(axis=1),\n point_cloud2d[:, 0] < W,\n point_cloud2d[:, 1] < H,\n ])\n uv = uv[in_view]\n\n for row in uv:\n cx, cy = row\n cv2.circle(self.data, (cx, cy), 7, RED, thickness=1)\n\n def clip_norm(v, x):\n M = np.linalg.norm(v)\n if M == 0:\n return v\n\n return np.clip(M, 0, x) * v / M\n\n if velocity is not None:\n tail = point_cloud + velocity_scale * velocity\n pointcloud_in_bev_tail = combined_transform * tail\n point_cloud2d_tail = pointcloud_in_bev_tail[:, :2]\n point_cloud2d_tail[:, 0] = (self._center_pixel[0] + point_cloud2d_tail[:, 0] * self._pixels_per_meter)\n point_cloud2d_tail[:, 1] = (self._center_pixel[1] + point_cloud2d_tail[:, 1] * self._pixels_per_meter)\n uv_tail = point_cloud2d_tail.astype(np.int32)\n uv_tail = uv_tail[in_view]\n for row, row_tail in zip(uv, uv_tail):\n v_2d = row_tail - row\n v_2d = clip_norm(v_2d, .025 * W)\n\n cx, cy = row\n cx2, cy2 = row + v_2d.astype(np.int)\n\n cx2 = np.clip(cx2, 0, W - 1)\n cy2 = np.clip(cy2, 0, H - 1)\n color = GREEN\n # If moving away from vehicle change the color (not strictly correct because radar is not a (0,0))\n # TODO: calculate actual radar sensor position\n if np.dot(row - np.array([W / 2, H / 2]), v_2d) > 0:\n color = (255, 110, 199)\n cv2.arrowedLine(self.data, (cx, cy), (cx2, cy2), color, thickness=1, line_type=cv2.LINE_AA)", "def vector_plot(tvects,is_vect=True,orig=[0,0,0]):\n\n if is_vect:\n if not hasattr(orig[0],\"__iter__\"):\n coords = [[orig,np.sum([orig,v],axis=0)] for v in tvects]\n else:\n coords = [[o,np.sum([o,v],axis=0)] for o,v in zip(orig,tvects)]\n else:\n coords = tvects\n\n data = []\n for i,c in enumerate(coords):\n X1, Y1, Z1 = zip(c[0])\n X2, Y2, Z2 = zip(c[1])\n vector = go.Scatter3d(x = [X1[0],X2[0]],\n y = [Y1[0],Y2[0]],\n z = [Z1[0],Z2[0]],\n marker = dict(size = [0,5],\n color = ['blue'],\n line=dict(width=5,\n color='DarkSlateGrey')),\n name = 'Vector'+str(i+1))\n data.append(vector)\n\n layout = go.Layout(\n margin = dict(l = 4,\n r = 4,\n b = 4,\n t = 4)\n )\n fig = go.Figure(data=data,layout=layout)\n #pio.write_html(fig,file='index.html',auto_open=False)\n #py.plot(fig, filename = 'gdp_per_cap4', auto_open=True)\n fig.show()", "def testPlots(self):\n\t\tself.watcher.analyze(layers=[67], plot=True, randomize=True)", "def visualize_clouds(clouds, paths):\n\n for cloud, path in zip(clouds, paths):\n print(\"Visualizing scans generated from:\", path)\n for scan in cloud:\n draw_geometries([scan])", "def generate_wordcloud(dict_, title='WordCloud', PATH=None):\n wordcloud = WordCloud(min_font_size=10).generate_from_frequencies(dict_)\n plt.figure(figsize = (8, 8), facecolor = None) \n plt.imshow(wordcloud) \n plt.axis(\"off\") \n plt.title(title, size = 24)\n plt.tight_layout(pad = 0) \n if PATH:\n plt.savefig(PATH, bbox_inches=\"tight\", transparent=True)\n plt.show()", "def stamps(d_data='',**kwargs):\n GR = glo.global_results()\n if p.gal_index == 'all':\n gal_indices = np.arange(GR.N_gal)\n else:\n gal_indices = p.gal_index\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n for gal_index in gal_indices:\n\n fig, ax = plt.subplots(figsize=(8,8))\n\n gal_ob = gal.galaxy(GR=GR, gal_index=gal_index)\n simgas = gal_ob.particle_data.get_dataframe('simgas',d_data=d_data)\n map2D,lab,max_scale = make_projection_map(simgas,prop=p.prop,pix_size_kpc=p.pix_size_kpc,scale=1.5)\n\n # Plot\n ax.set_facecolor(\"black\")\n Rmax = max_scale/2\n if p.log:\n map2D[map2D < 10.**p.vmin] = 10.**p.vmin/2\n map2D[map2D > 10.**p.vmax] = 10.**p.vmax\n map2D = np.log10(map2D)\n if not p.log:\n map2D[map2D < p.vmin] = p.vmin/2\n map2D[map2D > p.vmax] = p.vmax\n im = ax.imshow(map2D,\\\n extent=[-Rmax,Rmax,-Rmax,Rmax],vmin=p.vmin,vmax=p.vmax,cmap=p.cmap)\n Rmax = p.R_max\n ax.set_xlim([-Rmax,Rmax])\n ax.set_ylim([-Rmax,Rmax])\n ax.text(0.05,0.05,'G%i' % gal_index,\\\n fontsize=55,transform=ax.transAxes,color='white')\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_aspect('equal')\n\n #plt.gca().set_axis_off()\n #plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, \n # hspace = 0, wspace = 0)\n #plt.margins(0,0)\n #plt.gca().xaxis.set_major_locator(plt.NullLocator())\n #plt.gca().yaxis.set_major_locator(plt.NullLocator())\n if not os.path.isdir(p.d_plot + 'sim_data/stamps/'): os.mkdir(p.d_plot + 'sim_data/stamps/') \n plt.savefig(p.d_plot + 'sim_data/stamps/%s%s_G%i.png' % (p.sim_name,p.sim_run,gal_index),\\\n bbox_inches = 'tight', pad_inches = 0)", "def showAsPoints(self): \n MonkeyPatchMayaVi()\n #import enthought.mayavi.mlab as mlab\n from mayavi import mlab\n \n @mlab.show\n def _showSimple():\n morphPts = [ SVVisitorFactory.Array4AllPoints(morph)() for morph in self.morphs ]\n pts = numpy.concatenate( morphPts)\n return mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], pts[:, 3], colormap=self.colormap, scale_factor=self.scale_factor)\n _showSimple()", "def PlotAirplane():\n airplane = vtkInterface.PolyData(planefile)\n airplane.Plot()", "def geocube():", "def draw3DPts(pcl_1, pcl_2=None, color_1=None, color_2=None):\n input_size_1 = list(pcl_1.size() )\n B = input_size_1[0]\n C = input_size_1[1]\n N1 = input_size_1[2]\n if pcl_2 is not None:\n input_size_2 = list(pcl_2.size() )\n N2 = input_size_2[2]\n\n pcl_1_cpu = pcl_1.cpu().numpy()\n if pcl_2 is not None:\n pcl_2_cpu = pcl_2.cpu().numpy()\n if color_1 is not None:\n color_1_cpu = color_1.cpu().numpy()\n else:\n color_1_cpu = None\n if color_2 is not None:\n color_2_cpu = color_2.cpu().numpy()\n else:\n color_2_cpu = None\n \n \n for i in range(B):\n # fig = plt.figure(i)\n # ax = fig.gca(projection='3d')\n # plt.cla()\n\n pcd_o3d_1 = np_batch_to_o3d_pcd(i, pcl_1_cpu, color_1_cpu)\n\n if pcl_2 is not None:\n pcd_o3d_2 = np_batch_to_o3d_pcd(i, pcl_2_cpu, color_2_cpu)\n draw_pcls(pcd_o3d_1, pcd_o3d_2, uniform_color=color_1 is None)\n else:\n draw_pcls(pcd_o3d_1, uniform_color=color_1 is None)\n\n # plt.axis('equal')\n # plt.show()\n # plt.gca().set_aspect('equal')\n # plt.gca().set_zlim(-10, 10)\n # plt.gca().set_zlim(0, 3.5)", "def dbg():\n ds = np.array([[0.0, 0.0, 0.0]], dtype=np.float32)\n cs = np.array([[0.0, 0.0, 0.0]], dtype=np.float32)\n\n # Get segmentation matrices\n mean_box, mean_bg = np.loadtxt('segmentation_data/mean_box.txt', dtype='float64'), np.loadtxt('segmentation_data/mean_bg.txt', dtype='float64')\n cv_box, cv_bg = np.loadtxt('segmentation_data/covariance_box.txt', dtype='float64'), np.loadtxt('segmentation_data/covariance_bg.txt', dtype='float64')\n segmentation_array = [mean_box, mean_bg, cv_box, cv_bg]\n # superimpose all from the out directory\n for (d, c) in load_all_msg():\n d0, c0 = get_render_points(d, c, segmentation_array)\n ds = np.concatenate((ds, d0))\n cs = np.concatenate((cs, c0))\n\n v = pptk.viewer(ds)\n v.set(point_size=0.0001, phi=0, r=1, theta=0)\n v.set(lookat=np.array([0.0, 0.0, 0.0], dtype=np.float32))\n np_colors_filtered = cs.astype(float)\n np_colors_filtered /= 255\n np_colors_filtered = np.c_[np_colors_filtered, np.ones(np_colors_filtered.shape[0])]\n v.attributes(np_colors_filtered)", "def drawPointCloud(points, ax, color=None):\n if len(points.shape) != 2 or points.shape[0] != 3:\n raise ValueError(\"'points' must be 3xN\")\n if color == None:\n color = __color_cycle.next()\n elif color in (0, 1, 2):\n color = points[color, :]\n ax.scatter(points[0,:].T, points[1,:].T, points[2,:].T, c=color)", "def resultPlots(record):\n record.createDataFrames()\n \n atmPlot(record)\n clientPlot(record)\n transactionPlot(record)", "def vis_difference(self):\n print(self.init_vec)\n\n init = self.init_output.numpy()\n\n alphas = np.linspace(0, 1, 20)\n for i, alpha in enumerate(alphas):\n\n display.clear_output(wait=True)\n norm = [torch.linalg.norm(torch.tensor(\n self.init_vec + alpha*self.eigen[i]), axis=1).detach().numpy() for i in range(2)]\n\n diff = np.array([self.compute_difference(\n alpha, self.eigen[i]) for i in range(2)])\n\n fig = plt.figure(figsize=(14, 12), tight_layout=True)\n fig.suptitle(\"Latent direction variation\", fontsize=20)\n gs = gridspec.GridSpec(2, 2)\n\n ax_temp = plt.subplot(gs[0, :])\n ax_temp.scatter(\n init[:, 0], init[:, 1])\n ax_temp.set_title(\"Initial Dataset\")\n ax_temp.set_xlim(-1, 1)\n ax_temp.set_ylim(-1, 1)\n [s.set_visible(False) for s in ax_temp.spines.values()]\n\n for j in range(2):\n ax_temp = plt.subplot(gs[1, j])\n sc = ax_temp.quiver(\n init[:, 0], init[:, 1], diff[j, :, 0], diff[j, :, 1], norm[j])\n sc.set_clim(np.min(norm[j]), np.max(norm[j]))\n plt.colorbar(sc)\n ax_temp.set_title(\n \"Direction: {}, alpha: {}\".format(j+1, alpha))\n ax_temp.set_xlim(-1, 1)\n ax_temp.set_ylim(-1, 1)\n [s.set_visible(False) for s in ax_temp.spines.values()]\n\n plt.savefig(\"frames_dir/fig_{}\".format(i))\n plt.show()", "def show_current_pair_by_3d_slice(iS,iT):\n import matplotlib.pyplot as plt\n import easyreg.viewers as viewers\n fig, ax = plt.subplots(2,3)\n plt.setp(plt.gcf(), 'facecolor', 'white')\n plt.style.use('bmh')\n\n ivsx = viewers.ImageViewer3D_Sliced(ax[0][0], iS, 0, 'source X', True)\n ivsy = viewers.ImageViewer3D_Sliced(ax[0][1], iS, 1, 'source Y', True)\n ivsz = viewers.ImageViewer3D_Sliced(ax[0][2], iS, 2, 'source Z', True)\n\n ivtx = viewers.ImageViewer3D_Sliced(ax[1][0], iT, 0, 'target X', True)\n ivty = viewers.ImageViewer3D_Sliced(ax[1][1], iT, 1, 'target Y', True)\n ivtz = viewers.ImageViewer3D_Sliced(ax[1][2], iT, 2, 'target Z', True)\n\n\n feh = viewers.FigureEventHandler(fig)\n feh.add_axes_event('button_press_event', ax[0][0], ivsx.on_mouse_press, ivsx.get_synchronize, ivsx.set_synchronize)\n feh.add_axes_event('button_press_event', ax[0][1], ivsy.on_mouse_press, ivsy.get_synchronize, ivsy.set_synchronize)\n feh.add_axes_event('button_press_event', ax[0][2], ivsz.on_mouse_press, ivsz.get_synchronize, ivsz.set_synchronize)\n\n feh.add_axes_event('button_press_event', ax[1][0], ivtx.on_mouse_press, ivtx.get_synchronize, ivtx.set_synchronize)\n feh.add_axes_event('button_press_event', ax[1][1], ivty.on_mouse_press, ivty.get_synchronize, ivty.set_synchronize)\n feh.add_axes_event('button_press_event', ax[1][2], ivtz.on_mouse_press, ivtz.get_synchronize, ivtz.set_synchronize)\n\n feh.synchronize([ax[0][0], ax[1][0]])\n feh.synchronize([ax[0][1], ax[1][1]])\n feh.synchronize([ax[0][2], ax[1][2]])", "def visualise_points_on_rd(rd_matrix, path, points, range_res, doppler_res):\n rd_img = SignalVisualizer(rd_matrix).get_image\n for point in points:\n range_coord = (point[0] / range_res).astype(int)\n doppler_coord = (point[1] / doppler_res).astype(int)\n if point[1] < 0:\n doppler_coord += int(rd_matrix.shape[1]/2 - 1)\n else:\n doppler_coord += int(rd_matrix.shape[1]/2)\n rd_img[range_coord*4:(range_coord*4+4),\n doppler_coord*4:(doppler_coord*4+4)] = [0., 0., 0.]\n plt.imsave(path, rd_img)\n plt.close()", "def vis_d():\n \n data_gen = generator(fixed_noise)\n# loss = d_loss(discriminator(data_gen), discriminator(grid))\n loss = g_loss(discriminator(grid))\n loss.backward()\n \n grads = - grid.grad.data.numpy()\n grid.grad.data *= 0 \n plt.quiver(X_grid, Y_grid, grads[:, 0], grads[:, 1], color='black',alpha=0.9)", "def showCrossPoints(self, surface):\n for point in self.cross_points:\n point.show(surface)", "def plotPoints(x,y):\n display = PacmanPlot(x,y)\n display.takeControl()", "def __init__(self, eulers_list):\n self.__dpi = 150\n self.__title = \"default\"\n self.__data = eulers_list\n self.__plane_list = [[0, 0, 1]]\n self.__is_literal = True # whether to use permutation to get a family of planes\n self.__lattice_vector = np.array([1.0, 1.0, 1.0]) # most simple case as default\n self.__output = \"pdf\"\n self.__clr_list = None\n self.__ref = np.eye(3) # matrix used to define xtal unit cell in reference configuration\n # set up pyplot\n self.__fig = plt.figure()\n self.__fig.add_subplot(111, aspect='equal')\n self.__fig.gca().add_artist(plt.Circle((0, 0), 1, color='k', fill=False))\n self.__unique_marker = False\n plt.plot([-1, 1], [0, 0], c=\"k\")\n plt.plot([0, 0], [-1, 1], c=\"k\")\n plt.gca().set_xlim((-1.15, 1.15))\n plt.gca().set_ylim((-1.15, 1.15))\n plt.gca().axes.get_xaxis().set_visible(False)\n plt.gca().axes.get_yaxis().set_visible(False)", "def show():\n setup()\n plt.show()", "def point_cloud(self, X, Y, Z, size=1, color='#FF3232', bordercolor='#FF3232', legend='', width=0.5, opacity=1.0):\n point_cloud = go.Scatter3d(\n x=X,\n y=Y,\n z=Z,\n # showlegend=False,\n name=legend,\n mode='markers',\n marker=dict(\n size=size,\n color=color,\n line=dict(\n color=bordercolor,\n width=width\n ),\n # opacity=opacity\n )\n )\n\n return point_cloud", "def get_pointcloud(dataset, NUM_POINT=2048, shuffle=True):\n if dataset == 'modelnet':\n train_file_idxs = np.arange(0, len(TRAIN_FILES_MODELNET))\n data_train = []\n label_train = []\n for fn in range(len(TRAIN_FILES_MODELNET)):\n print('----' + str(fn) + '-----')\n current_data, current_label = provider.loadDataFile(TRAIN_FILES_MODELNET[fn])\n current_data = current_data[:,0:NUM_POINT,:]\n current_label = np.squeeze(current_label)\n data_train.append(current_data)\n label_train.append(current_label)\n result_train = np.vstack(data_train)\n label_train = np.concatenate(label_train, axis=None)\n if shuffle:\n X_train, y_train, _ = provider.shuffle_data(result_train, np.squeeze(label_train)) \n else:\n X_train, y_train = result_train, np.squeeze(label_train)\n \n data_test = []\n label_test = []\n for fn in range(len(TEST_FILES_MODELNET)):\n print('----' + str(fn) + '-----')\n current_data, current_label = provider.loadDataFile(TEST_FILES_MODELNET[fn])\n current_data = current_data[:,0:NUM_POINT,:]\n current_label = np.squeeze(current_label)\n data_test.append(current_data)\n label_test.append(current_label)\n result_test = np.vstack(data_test)\n label_test = np.concatenate(label_test, axis=None)\n if shuffle:\n X_test, y_test, _ = provider.shuffle_data(result_test, np.squeeze(label_test))\n else:\n X_test, y_test = result_test, np.squeeze(label_test)\n elif dataset == 'shapenet':\n shapenet_data, shapenet_label = provider.get_shapenet_data()\n shapenet_data = shapenet_data[:,0:NUM_POINT,:]\n X_train, X_test, y_train, y_test = train_test_split(shapenet_data, shapenet_label, test_size=0.2, random_state=42, shuffle=shuffle)\n elif dataset == 'shapenet_chair':\n shapenet_data, shapenet_label = provider.get_shapenet_data()\n shapenet_data = shapenet_data[:,0:NUM_POINT,:]\n shapenet_data, shapenet_label = shapenet_data[shapenet_label==17], shapenet_label[shapenet_label==17]\n X_train, X_test, y_train, y_test = train_test_split(shapenet_data, shapenet_label, test_size=0.2, random_state=42, shuffle=shuffle)\n elif dataset == 'modelnet10':\n current_data, current_label = provider.loadDataFile(MODELNET10_TRAIN_FILE)\n current_data = current_data[:,0:NUM_POINT,:]\n if shuffle:\n current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label)) \n current_label = np.squeeze(current_label)\n X_train, y_train = current_data, current_label\n\n current_data, current_label = provider.loadDataFile(MODELNET10_TEST_FILE)\n current_data = current_data[:,0:NUM_POINT,:]\n if shuffle:\n current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label)) \n current_label = np.squeeze(current_label)\n X_test, y_test = current_data, current_label\n elif dataset == 'keypoint':\n current_data, current_label = provider.load_mat_keypts(TRAIN_CHAIR_FILES, KEYPOINT_CHAIR_PATH, NUM_POINT)\n if shuffle:\n current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label)) \n for i in range(current_data.shape[0]): # shuffle order of points in a single model, otherwise keypoints are always at the end\n idx = np.arange(current_data.shape[1])\n np.random.shuffle(idx)\n current_data = current_data[:, idx, :]\n current_label = current_label[:, idx]\n current_label = np.squeeze(current_label)\n X_train, y_train = current_data, current_label\n\n current_data, current_label = provider.load_mat_keypts(TEST_CHAIR_FILES, KEYPOINT_CHAIR_PATH, NUM_POINT)\n if shuffle:\n current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label)) \n for i in range(current_data.shape[0]):\n idx = np.arange(current_data.shape[1])\n np.random.shuffle(idx)\n current_data = current_data[:, idx, :]\n current_label = current_label[:, idx]\n current_label = np.squeeze(current_label)\n X_test, y_test = current_data, current_label\n elif dataset == 'keypoint_10class':\n current_data, current_label = provider.load_mat_keypts(TRAIN_CHAIR_FILES, KEYPOINT_CHAIR_PATH, NUM_POINT)\n current_label[:, -10:] = np.arange(1, 11)\n if shuffle:\n current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label)) \n for i in range(current_data.shape[0]): # shuffle order of points in a single model, otherwise keypoints are always at the end\n idx = np.arange(current_data.shape[1])\n np.random.shuffle(idx)\n current_data = current_data[:, idx, :]\n current_label = current_label[:, idx]\n current_label = np.squeeze(current_label)\n X_train, y_train = current_data, current_label\n\n current_data, current_label = provider.load_mat_keypts(TEST_CHAIR_FILES, KEYPOINT_CHAIR_PATH, NUM_POINT)\n current_label[:, -10:] = np.arange(1, 11)\n if shuffle:\n current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label)) \n for i in range(current_data.shape[0]):\n idx = np.arange(current_data.shape[1])\n np.random.shuffle(idx)\n current_data = current_data[:, idx, :]\n current_label = current_label[:, idx]\n current_label = np.squeeze(current_label)\n X_test, y_test = current_data, current_label\n elif dataset == \"keypointnet\":\n json_path = osp.join(KEYPOINTNET_PATH, \"annotations/all.json\")\n annots = json.load(open(json_path))\n X = []\n y = []\n for annot in annots:\n class_id = annot[\"class_id\"]\n model_id = annot[\"model_id\"]\n kpts = []\n for kpt in annot[\"keypoints\"]:\n kpts.append(kpt[\"xyz\"])\n pcd_path = osp.join(KEYPOINTNET_PATH, f\"pcds/{class_id}/{model_id}.pcd\")\n if os.path.exists(pcd_path):\n pcd = naive_read_pcd(pcd_path)\n pcd = pcd[0:NUM_POINT, :]\n else:\n continue\n if len(kpts) != 10:\n continue\n pcd = np.concatenate((pcd[:-10], kpts))\n label = np.zeros(NUM_POINT-10)\n label = np.concatenate((label, np.ones(10)))\n X.append(pcd)\n y.append(label)\n current_data = np.array(X)\n current_label = np.array(y)\n if False and shuffle:\n current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label)) \n for i in range(current_data.shape[0]): # shuffle order of points in a single model, otherwise keypoints are always at the end\n idx = np.arange(current_data.shape[1])\n np.random.shuffle(idx)\n current_data = current_data[:, idx, :]\n current_label = current_label[:, idx]\n current_label = np.squeeze(current_label)\n X_train, X_test, y_train, y_test = train_test_split(current_data, current_label, test_size=0.2, random_state=42, shuffle=shuffle)\n else:\n raise NotImplementedError()\n print(f'Dataset name: {dataset}')\n print(f'X_train: {X_train.shape}')\n print(f'X_test: {X_test.shape}')\n print(f'y_train: {y_train.shape}')\n print(f'y_test: {y_test.shape}')\n return X_train, X_test, y_train, y_test", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"file\",help = \"netCDF4 file to visualize\")\n args = parser.parse_args()\n\n print(\"Visualizing file %s\" % args.file)\n\n # open data in read mode\n data = netCDF4.Dataset(args.file, 'r')\n # surf_2d_slice(data)\n yz_slice(data, 50)", "def visualizemap(dna, map_view=\"linear\", feature_list=None, start=0, end=None,label_location=None, display_label=2, display_title=True, display_axis=True, fontsize=None, fontsize_nucl=None, \n tick_interval=\"auto\", labelcolor=\"k\", title=None, width_scale=\"auto\", height_scale=1.0, linebreak=None, seq=False, rcseq=False, diamater_scale=1.0, fig= None):\n\n if fontsize is None and map_view == \"linear\":\n fontsize = 12\n elif fontsize is None and map_view == \"circular\":\n fontsize = 10\n else:\n pass \n\n if title is None or title == \"\":\n display_titlee = False\n\n #if map_view == \"circular\":\n #feature_list.sort(key=lambda x:len(dna.printsequence(x.start, x.end)))\n \n standard_scale = 4000\n if map_view == \"circular\":\n figo, ax1, ax2= vc.visualize(dna, format=0, feature_list=feature_list, bottom=400 * diamater_scale, label_visible=display_label, fontsize=fontsize, \n title_visible=display_title, axis_visible=display_axis, tick_space=tick_interval, labelcolor=labelcolor, \n titlename=title, fig=fig)\n try:\n import patchworklib \n _patchworklib = True\n except:\n _patchworklib = False\n \n if _patchworklib == True:\n ax1 = patchworklib.cBrick(ax=ax1)\n ax2 = patchworklib.Brick(ax=ax2)\n if fig == patchworklib.Brick._figure or fig is None:\n return patchworklib.Bricks({ax1.get_label():ax1, ax2.get_label():ax2}) \n else:\n return figo\n else:\n return figo\n else:\n if feature_list is None:\n feature_list = dna.dnafeatures\n figo, ax = vl.visualize(dna, start=start, end=end, feature_list=feature_list, wrap_width=linebreak, annotation_loc=label_location, unvisible_types=[\"source\"], \n visible_types=[], enlarge_w=width_scale, enlarge_h=height_scale, fontsize=fontsize, fontsize_nucl=fontsize_nucl, with_seq=seq, with_seqr=rcseq, nucl_char=None, nucl_color_dict=None, \n label_visible=display_label, scale=\"fix\", title_visible=display_title, axis_visible=display_axis, tick_space=tick_interval, \n labelcolor=labelcolor, titlename=title, fig=fig)\n try:\n import patchworklib \n _patchworklib = True\n except:\n _patchworklib = False\n \n if _patchworklib == True:\n if fig == patchworklib.Brick._figure or fig is None:\n return ax\n else:\n return figo \n else:\n return figo", "def pangolin_draw(points):\n display.setup()\n\n while not display.should_quit():\n display.init_frame()\n for idx, pose in enumerate(CAMERA_POSES):\n is_last_pose = idx == len(CAMERA_POSES) - 1\n homogenous_pose = np.vstack((pose, [0, 0, 0, 1]))\n display.draw_camera(homogenous_pose, (0.0, 1.0, 1.0 if is_last_pose else 0.0))\n\n display.draw_points(points)\n\n display.finish_frame()", "def show(self):\n from matplotlib import pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n\n fig = plt.figure()\n ax = Axes3D(fig)\n pos = self.cluster.get_positions()\n from itertools import combinations\n for tri in self.mesh.simplices:\n for comb in combinations(tri, 2):\n x1 = pos[comb[0], 0]\n x2 = pos[comb[1], 0]\n y1 = pos[comb[0], 1]\n y2 = pos[comb[1], 1]\n z1 = pos[comb[0], 2]\n z2 = pos[comb[1], 2]\n ax.plot([x1, x2], [y1, y2], zs=[z1, z2], color=\"black\")\n plt.show()", "def plot_lda_2d(self):\n\n # Initialize LDA in 3D, fit on data.\n lda = LinearDiscriminantAnalysis(n_components=2)\n lda_data = lda.fit(self.train_data[self.feature_names], \n self.train_data[self.outcome_name]).transform(self.train_data[self.feature_names])\n print(\"Explained variance with 2 components: \", lda.explained_variance_ratio_)\n\n # Label based on outcome (1-5).\n lda_x = lda_data[:,0]\n lda_y = lda_data[:,1]\n labels = self.train_data[self.outcome_name].values\n unique_labels = np.unique(labels)\n colors = {1:'tab:purple', 2:'tab:orange', 3:'tab:green', 4:'tab:red', 5:'tab:blue'}\n \n # Define the graph.\n fig = plt.figure(figsize=(10,8))\n ax = plt.subplot()\n\n for l in unique_labels:\n i = np.where(labels==l)\n ax.scatter(lda_x[i], lda_y[i], c=colors[l], alpha=0.2)\n\n plt.xlabel(\"LDA 1\",fontsize=14)\n plt.ylabel(\"LDA 2\",fontsize=14)\n plt.legend(unique_labels)\n ax.set_title('LDA fit on 5000 dataset, applied on 5000 dataset', fontsize=14)\n\n plt.show()\n plt.savefig(r'data_analysis\\lda2d_' + self.file_name + '.png', \n facecolor=fig.get_facecolor(), bbox_inches='tight')\n return lda", "def on_clicked_point(self, clicked_point_msg):\n\n print \"received a /clicked_point message . . . visualizing\"\n pos = clicked_point_msg.point\n x, y, z = pos.x, pos.y, pos.z\n\n marker = visualization_msgs.msg.Marker()\n marker.header.frame_id = \"base\"\n marker.header.stamp = rospy.Time.now()\n marker.ns = \"clicked_point\"\n marker.id = 0\n marker.type = visualization_msgs.msg.Marker.SPHERE\n marker.action = visualization_msgs.msg.Marker.ADD\n marker.pose.position.x = x\n marker.pose.position.y = y\n marker.pose.position.z = z\n\n marker.pose.orientation.x = 0.0\n marker.pose.orientation.y = 0.0\n marker.pose.orientation.z = 0.0\n marker.pose.orientation.w = 1.0\n marker.scale.x = 0.03\n marker.scale.y = 0.03\n marker.scale.z = 0.03\n marker.color.a = 1.0\n marker.color.r = 1.0\n marker.color.g = 0.0\n marker.color.b = 0.0\n\n # hack to get around director funny business\n for i in xrange(0, 5):\n self.rviz_marker_publisher.publish(marker)\n rospy.sleep(0.02)", "def volcano_plotter():\n print(\"this is volcano plotter\")\n from math import log\n with open(\"../bob/processed/24h_bobdata_ed2_volcano.csv\", \"w\") as outF:\n outF.write(\"Gene log2FoldChange pvalue\\n\")\n with open(\"../bob/processed/24h_bobdata_ed2.csv\", \"r\") as inpF:\n skipFlag = True\n missCount = 1\n for inpLine in inpF:\n if skipFlag:\n skipFlag = False\n continue\n inpLine = inpLine.split(\"\\\" \\\"\")\n curLine = []\n for inpI in inpLine:\n try:\n curLine.append(float(inpI.strip(\"\\\"\\n \")))\n except ValueError:\n curLine.append(inpI.strip(\"\\\"\\n \")) # by this point, each line in the entry file is processed into a neat list\n if curLine[2] == \"\": # if no gene name is given, just add a placeholder\n curLine[2] = \"Noname\" + str(missCount)\n missCount += 1\n # calculate log2foldChange here:\n try:\n FAvg = (curLine[4] + curLine[5] + curLine[6])/3.0 # KO\n SAvg = (curLine[7] + curLine[8] + curLine[9])/3.0 # WT\n except TypeError:\n print(curLine)\n raise\n logFoldChange = log(SAvg/FAvg,2) # so positive numbers are more abundant in the wt cells, negatives number in the KO, at least for the 24H bobdata file\n outF.write(curLine[2] + \" \" + str(logFoldChange) + \" \" + str(curLine[10]) + \"\\n\") # write out results to file", "def gcs_analysis(detrended_dem, zs, xs_lengths, xs_spacing, analysis, clip_poly='', stage_plots=False, nest_plots=False):\n if not analysis:\n print('Extract GCS series...' )\n extract_gcs(detrended_dem, zs, xs_lengths, xs_spacing, clip_poly=clip_poly)\n print('Done')\n elif stage_plots and not nest_plots:\n print('Stage plots')\n elif stage_plots and nest_plots:\n print('Both plots')\n elif not stage_plots and nest_plots:\n print('Nest plots')\n\n print(stage_plots)\n print(nest_plots)\n print('In the gcs function')", "def plot_embedding_lda(features, labels):\n\n import bob.learn.linear\n import matplotlib.pyplot as mpl\n\n colors = ['#FF0000', '#FFFF00', '#FF00FF', '#00FFFF', '#000000',\n '#AA0000', '#AAAA00', '#AA00AA', '#00AAAA', '#330000']\n n_classes = max(labels)+1\n\n\n # Training PCA\n trainer = bob.learn.linear.FisherLDATrainer(use_pinv=True)\n lda_features = []\n for i in range(n_classes):\n indexes = numpy.where(labels == i)[0]\n lda_features.append(features[indexes, :].astype(\"float64\"))\n\n machine, lamb = trainer.train(lda_features)\n\n #import ipdb; ipdb.set_trace();\n\n\n # Getting the first two most relevant features\n projected_features = machine(features.astype(\"float64\"))[:, 0:2]\n\n # Plotting the classes\n fig = mpl.figure()\n\n for i in range(n_classes):\n indexes = numpy.where(labels == i)[0]\n\n selected_features = projected_features[indexes,:]\n mpl.scatter(selected_features[:, 0], selected_features[:, 1],\n marker='.', c=colors[i], linewidths=0, label=str(i))\n mpl.legend()\n return fig", "def visualize(houses:pd.DataFrame) -> None:\n #price_distribution(houses)\n #prop_types(houses)\n #zip_code(houses)\n #year_built(houses)\n #bed_bath(houses)\n return", "def plot_data(self):\n if hasattr(self,'data'):\n plt.scatter(*self.data.T)\n plt.show()\n else:\n raise Exception('No 2d data of the instance has been loaded')", "def __init__(self, dataset: ds.Dataset, settings):\r\n self.dataset = dataset\r\n self.settings = settings\r\n\r\n self.visualizer = visualizer.Visualizer()", "def main():\n cam = Realsense()\n # cam.access_intr_and_extr()\n profile = cam.pipeline.start(cam.config)\n depth_sensor = profile.get_device().first_depth_sensor()\n depth_scale = depth_sensor.get_depth_scale()\n align_to = rs.stream.color\n align = rs.align(align_to)\n\n objp = np.zeros((3*4,3), np.float32)\n objp[:,:2] = np.mgrid[0:4,0:3].T.reshape(-1,2)\n axis = np.float32([[1,0,0], [0,1,0], [0,0,-1]]).reshape(-1,3)\n # print(objp)\n\n try:\n while (True):\n # detect ArUco markers in RGB images\n frames = cam.pipeline.wait_for_frames()\n aligned_frames = align.process(frames)\n color_frame = aligned_frames.get_color_frame()\n color_image = np.asanyarray(color_frame.get_data()) \n frame = color_image\n font = cv2.FONT_HERSHEY_SIMPLEX\n corners, ids, rvecs, tvecs = cam.detect_markers_realsense(frame)\n \n if np.all(ids != None): # if markers are detected\n for i in range(0, ids.size):\n aruco.drawAxis(frame, cam.newcameramtx, cam.dist, rvecs[i],\n tvecs[i], 0.1) # Draw axis\n aruco.drawDetectedMarkers(frame, corners) # draw square around markers\n\n ###### DRAW ID #####\n strg = ''\n for i in range(0, ids.size):\n strg += str(ids[i][0])+', '\n\n cv2.putText(frame, \"Id: \" + strg, (0,25), font, 1, (0,255,0), 2,\n cv2.LINE_AA)\n\n\t ###### Output marker positions in camera frame ######\n \t # output tvec\n y0 = 60\n dy = 40\n for i in range(0, ids.size):\n y = y0 + i*dy\n cv2.putText(frame, str(tvecs[i][0]), (0, y), font, 1, (0,255,0),\n 2, cv2.LINE_AA)\n\n else:\n ##### DRAW \"NO IDS\" #####\n cv2.putText(frame, \"No Ids\", (0,64), font, 1, (0,255,0), 2,\n cv2.LINE_AA)\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n ret, corners = cv2.findChessboardCorners(gray, (4,3), None)\n if ret == True:\n corners2 = cv2.cornerSubPix(gray, corners,(11,11), (-1,-1),\n cam.criteria)\n corners2 = corners2[::-1]\n # print(corners2)\n # print(objp)\n frame = cv2.drawChessboardCorners(frame, (4,3), corners2, ret)\n # Find the rotation and translation vectors.\n _, rvecs, tvecs = cv2.solvePnP(objp, corners2, cam.newcameramtx,\n cam.dist)\n rot, _ = cv2.Rodrigues(rvecs)\n # print(rot)\n # project 3D points to image plane\n imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs,\n cam.newcameramtx, cam.dist)\n frame = draw(frame, corners2, imgpts)\n\n # Display the resulting frame\n cv2.imshow('frame',frame)\n cv2.waitKey(5)\n\n # When everything done, release the capture\n cv2.destroyAllWindows()\n\n finally:\n cam.pipeline.stop()", "def make_photom_catalog_uvis(data, filt, origin=''):\n\tfnarr = [data[key]['filename'] for key in data.keys()]\n\t# Make sure filename does not include path.\n\tif '/' in fnarr[0]:\n\t if 'temp_lacos' in fnarr[0]:\n\t for i in range(len(fnarr)):\n\t file_name = fnarr[i].split('temp_lacos/')[1]\n\t fnarr[i] = file_name\n\t else:\n\t for i in range(len(fnarr)):\n\t file_name = (fnarr[i].split('/'))[len(fnarr[i].split('/'))-1]\n\t fnarr[i] = file_name\n\t\n\tamparr = [data[key]['amp'] for key in data.keys()]\n\tshutarr = [data[key]['shutter'] for key in data.keys()]\n\tmjdarr = [data[key]['mjd_avg'] for key in data.keys()]\n\tmjddeltarr = [data[key]['mjd_deltat'] for key in data.keys()]\n\tchiparr = [data[key]['chip'] for key in data.keys()]\n\taxis1arr = [data[key]['axis1'] for key in data.keys()]\n\taxis2arr = [data[key]['axis2'] for key in data.keys()]\n\txcarr = [data[key]['xc'] for key in data.keys()]\n\tycarr = [data[key]['yc'] for key in data.keys()]\n\txcparr = [data[key]['xcp'] for key in data.keys()]\n\tycparr = [data[key]['ycp'] for key in data.keys()]\n\tbackarr = [data[key]['background'] for key in data.keys()]\n\tbackrmsarr = [data[key]['background_rms'] for key in data.keys()]\n\texptimearr = [data[key]['exptime'] for key in data.keys()]\n\tf1 = [data[key]['flux'][0] for key in data.keys()]\n\tf2 = [data[key]['flux'][1] for key in data.keys()]\n\tf3 = [data[key]['flux'][2] for key in data.keys()]\n\tf4 = [data[key]['flux'][3] for key in data.keys()]\n\tf5 = [data[key]['flux'][4] for key in data.keys()]\n\tf6 = [data[key]['flux'][5] for key in data.keys()]\n\tf7 = [data[key]['flux'][6] for key in data.keys()]\n\tf8 = [data[key]['flux'][7] for key in data.keys()]\n\tf9 = [data[key]['flux'][8] for key in data.keys()]\n\tf10 = [data[key]['flux'][9] for key in data.keys()]\n\tf12 = [data[key]['flux'][10] for key in data.keys()]\n\tf14 = [data[key]['flux'][11] for key in data.keys()]\n\tf16 = [data[key]['flux'][12] for key in data.keys()]\n\tf18 = [data[key]['flux'][13] for key in data.keys()]\n\tf20 = [data[key]['flux'][14] for key in data.keys()]\n\tf24 = [data[key]['flux'][15] for key in data.keys()]\n\tf28 = [data[key]['flux'][16] for key in data.keys()]\n\tf32 = [data[key]['flux'][17] for key in data.keys()]\n\tf36 = [data[key]['flux'][18] for key in data.keys()]\n\tf40 = [data[key]['flux'][19] for key in data.keys()]\n\tf45 = [data[key]['flux'][20] for key in data.keys()]\n\tf50 = [data[key]['flux'][21] for key in data.keys()]\n\tf55 = [data[key]['flux'][22] for key in data.keys()]\n\tf60 = [data[key]['flux'][23] for key in data.keys()]\n\tf65 = [data[key]['flux'][24] for key in data.keys()]\n\tf70 = [data[key]['flux'][25] for key in data.keys()]\n \n\tm1 = [data[key]['mag'][0] for key in data.keys()]\n\tm2 = [data[key]['mag'][1] for key in data.keys()]\n\tm3 = [data[key]['mag'][2] for key in data.keys()]\n\tm4 = [data[key]['mag'][3] for key in data.keys()]\n\tm5 = [data[key]['mag'][4] for key in data.keys()]\n\tm6 = [data[key]['mag'][5] for key in data.keys()]\n\tm7 = [data[key]['mag'][6] for key in data.keys()]\n\tm8 = [data[key]['mag'][7] for key in data.keys()]\n\tm9 = [data[key]['mag'][8] for key in data.keys()]\n\tm10 = [data[key]['mag'][9] for key in data.keys()]\n\tm12 = [data[key]['mag'][10] for key in data.keys()]\n\tm14 = [data[key]['mag'][11] for key in data.keys()]\n\tm16 = [data[key]['mag'][12] for key in data.keys()]\n\tm18 = [data[key]['mag'][13] for key in data.keys()]\n\tm20 = [data[key]['mag'][14] for key in data.keys()]\n\tm24 = [data[key]['mag'][15] for key in data.keys()]\n\tm28 = [data[key]['mag'][16] for key in data.keys()]\n\tm32 = [data[key]['mag'][17] for key in data.keys()]\n\tm36 = [data[key]['mag'][18] for key in data.keys()]\n\tm40 = [data[key]['mag'][19] for key in data.keys()]\n\tm45 = [data[key]['mag'][20] for key in data.keys()]\n\tm50 = [data[key]['mag'][21] for key in data.keys()]\n\tm55 = [data[key]['mag'][22] for key in data.keys()]\n\tm60 = [data[key]['mag'][23] for key in data.keys()]\n\tm65 = [data[key]['mag'][24] for key in data.keys()]\n\tm70 = [data[key]['mag'][25] for key in data.keys()]\n\t\n\tm1_err = [data[key]['merr'][0] for key in data.keys()]\n\tm2_err = [data[key]['merr'][1] for key in data.keys()]\n\tm3_err = [data[key]['merr'][2] for key in data.keys()]\n\tm4_err = [data[key]['merr'][3] for key in data.keys()]\n\tm5_err = [data[key]['merr'][4] for key in data.keys()]\n\tm6_err = [data[key]['merr'][5] for key in data.keys()]\n\tm7_err = [data[key]['merr'][6] for key in data.keys()]\n\tm8_err = [data[key]['merr'][7] for key in data.keys()]\n\tm9_err = [data[key]['merr'][8] for key in data.keys()]\n\tm10_err = [data[key]['merr'][9] for key in data.keys()]\n\tm12_err = [data[key]['merr'][10] for key in data.keys()]\n\tm14_err = [data[key]['merr'][11] for key in data.keys()]\n\tm16_err = [data[key]['merr'][12] for key in data.keys()]\n\tm18_err = [data[key]['merr'][13] for key in data.keys()]\n\tm20_err = [data[key]['merr'][14] for key in data.keys()]\n\tm24_err = [data[key]['merr'][15] for key in data.keys()]\n\tm28_err = [data[key]['merr'][16] for key in data.keys()]\n\tm32_err = [data[key]['merr'][17] for key in data.keys()]\n\tm36_err = [data[key]['merr'][18] for key in data.keys()]\n\tm40_err = [data[key]['merr'][19] for key in data.keys()]\n\tm45_err = [data[key]['merr'][20] for key in data.keys()]\n\tm50_err = [data[key]['merr'][21] for key in data.keys()]\n\tm55_err = [data[key]['merr'][22] for key in data.keys()]\n\tm60_err = [data[key]['merr'][23] for key in data.keys()]\n\tm65_err = [data[key]['merr'][24] for key in data.keys()]\n\tm70_err = [data[key]['merr'][25] for key in data.keys()]\n \n\ttt = {'#filename':fnarr, 'amp':amparr, 'shutter':shutarr, \\\n\t 'mjd_avg':mjdarr, 'mjd_deltat':mjddeltarr, 'chip':chiparr, \\\n\t 'axis1':axis1arr, 'axis2':axis2arr,'xc':xcarr, 'yc':ycarr, \\\n\t 'xcp':xcparr, 'ycp':ycparr, 'background':backarr, \\\n\t 'background_rms':backrmsarr, 'exptime':exptimearr, \\\n 'f1':f1, 'f2':f2, 'f3':f3,'f4':f4,'f5':f5,'f6':f6,'f7':f7,'f8':f8,\\\n 'f9':f9,'f10':f10,'f12':f12,'f14':f14,'f16':f16,'f18':f18,'f20':f20,\\\n 'f24':f24,'f28':f28,'f32':f32,'f36':f36,'f40':f40,'f45':f45,\\\n 'f50':f50,'f55':f55,'f60':f60,'f65':f65,'f70':f70,'m1':m1, 'm2':m2, \\\n 'm3':m3,'m4':m4,'m5':m5,'m6':m6,'m7':m7,'m8':m8,'m9':m9,'m10':m10,\\\n 'm12':m12,'m14':m14,'m16':m16,'m18':m18,'m20':m20,'m24':m24,\\\n 'm28':m28,'m32':m32,'m36':m36,'m40':m40,'m45':m45,'m50':m50,\\\n 'm55':m55,'m60':m60,'m65':m65,'m70':m70,'m1err':m1_err, \\\n 'm2err':m2_err, 'm3err':m3_err,'m4err':m4_err,'m5err':m5_err,\\\n 'm6err':m6_err,'m7err':m7_err,'m8err':m8_err,'m9err':m9_err,\\\n 'm10err':m10_err,'m12err':m12_err,'m14err':m14_err,'m16err':m16_err,\\\n 'm18err':m18_err,'m20err':m20_err,'m24err':m24_err,'m28err':m28_err,\\\n 'm32err':m32_err,'m36err':m36_err,'m40err':m40_err,'m45err':m45_err,\\\n 'm50err':m50_err,'m55err':m55_err,'m60err':m60_err,'m65err':m65_err,\\\n 'm70err':m70_err}\n\n\tascii.write(tt, origin+filt+'_photcat.dat', \\\n\t names=['#filename','amp','shutter','mjd_avg','mjd_deltat',\\\n\t 'chip','axis1','axis2','xc','yc','xcp','ycp',\\\n\t 'background','background_rms','exptime', \\\n 'f1','f2','f3','f4','f5','f6','f7','f8','f9','f10',\\\n 'f12','f14','f16','f18','f20','f24','f28','f32','f36',\\\n 'f40','f45','f50','f55','f60','f65','f70',\\\n 'm1','m2','m3','m4','m5','m6','m7','m8','m9','m10',\\\n 'm12','m14','m16','m18','m20','m24','m28','m32','m36',\\\n 'm40','m45','m50','m55','m60','m65','m70','m1err',\\\n 'm2err','m3err','m4err','m5err','m6err','m7err',\\\n 'm8err','m9err','m10err','m12err','m14err','m16err',\\\n 'm18err','m20err','m24err','m28err','m32err','m36err',\\\n 'm40err','m45err','m50err','m55err','m60err','m65err',\\\n 'm70err'], \\\n formats={'#filename':'%s','amp':'%s','shutter':'%s',\\\n 'mjd_avg':'%9.4f','mjd_deltat':'%6.4f','chip':'%i',\\\n 'axis1':'%i','axis2':'%i','xc':'%8.3f','yc':'%8.3f',\\\n 'xcp':'%8.3f','ycp':'%8.3f', 'background':'%0.5f',\\\n 'background_rms':'%0.5f', 'exptime':'%0.2f', \\\n 'f1':'%0.3f', 'f2':'%0.3f','f3':'%0.3f','f4':'%0.3f',\\\n 'f5':'%0.3f','f6':'%0.3f','f7':'%0.3f','f8':'%0.3f',\\\n 'f9':'%0.3f','f10':'%0.3f','f12':'%0.3f',\\\n 'f14':'%0.3f','f16':'%0.3f','f18':'%0.3f',\\\n 'f20':'%0.3f','f24':'%0.3f','f28':'%0.3f',\\\n 'f32':'%0.3f','f36':'%0.3f','f40':'%0.3f',\\\n 'f45':'%0.3f','f50':'%0.3f','f55':'%0.3f',\\\n 'f60':'%0.3f','f65':'%0.3f','f70':'%0.3f',\\\n 'm1':'%0.3f','m2':'%0.3f','m3':'%0.3f','m4':'%0.3f',\\\n 'm5':'%0.3f','m6':'%0.3f','m7':'%0.3f','m8':'%0.3f',\\\n 'm9':'%0.3f','m10':'%0.3f','m12':'%0.3f',\\\n 'm14':'%0.3f','m16':'%0.3f','m18':'%0.3f',\\\n 'm20':'%0.3f','m24':'%0.3f','m28':'%0.3f',\\\n 'm32':'%0.3f','m36':'%0.3f','m40':'%0.3f',\\\n 'm45':'%0.3f','m50':'%0.3f','m55':'%0.3f',\\\n 'm60':'%0.3f','m65':'%0.3f','m70':'%0.3f', \\\n 'm1err':'%0.3f', 'm2err':'%0.3f','m3err':'%0.3f',\\\n 'm4err':'%0.3f','m5err':'%0.3f','m6err':'%0.3f',\\\n 'm7err':'%0.3f','m8err':'%0.3f','m9err':'%0.3f',\\\n 'm10err':'%0.3f','m12err':'%0.3f','m14err':'%0.3f',\\\n 'm16err':'%0.3f','m18err':'%0.3f','m20err':'%0.3f',\\\n 'm24err':'%0.3f','m28err':'%0.3f','m32err':'%0.3f',\\\n 'm36err':'%0.3f','m40err':'%0.3f','m45err':'%0.3f',\\\n 'm50err':'%0.3f','m55err':'%0.3f','m60err':'%0.3f',\\\n 'm65err':'%0.3f','m70err':'%0.3f'})" ]
[ "0.6512084", "0.6371263", "0.6321242", "0.6314276", "0.6305938", "0.6247466", "0.6047218", "0.60251385", "0.5953147", "0.59473276", "0.5941391", "0.592496", "0.59040606", "0.5794781", "0.5758018", "0.5744503", "0.57257426", "0.570993", "0.5699411", "0.5648155", "0.5635541", "0.5606605", "0.56042576", "0.55811846", "0.5572101", "0.5556901", "0.55551714", "0.5549349", "0.55487186", "0.5545881", "0.55269665", "0.5521714", "0.5501523", "0.5482636", "0.5482193", "0.54782903", "0.5464992", "0.5464378", "0.5454902", "0.54540384", "0.5448684", "0.54333234", "0.54306203", "0.542788", "0.5410889", "0.54101884", "0.540644", "0.53814375", "0.5352965", "0.53525275", "0.53513896", "0.5348314", "0.5345702", "0.5343005", "0.5336661", "0.532894", "0.53176284", "0.5311283", "0.5309192", "0.53035754", "0.5293912", "0.5292802", "0.5281366", "0.5279924", "0.52740973", "0.5269596", "0.5264319", "0.52626836", "0.5261037", "0.52608806", "0.52583987", "0.52513593", "0.52483207", "0.52471", "0.5246682", "0.5234482", "0.5225106", "0.52244127", "0.5213012", "0.5203135", "0.51994926", "0.5196368", "0.518977", "0.518976", "0.5189372", "0.5188447", "0.5180899", "0.5178761", "0.5176789", "0.5175324", "0.51655287", "0.51654667", "0.51587707", "0.5158564", "0.515587", "0.5153988", "0.51534384", "0.51469016", "0.51442343", "0.5143039" ]
0.5327644
56
Visualise and record rangeDoppler matrices with projected points PARAMETERS
def visualise_points_on_rd(rd_matrix, path, points, range_res, doppler_res): rd_img = SignalVisualizer(rd_matrix).get_image for point in points: range_coord = (point[0] / range_res).astype(int) doppler_coord = (point[1] / doppler_res).astype(int) if point[1] < 0: doppler_coord += int(rd_matrix.shape[1]/2 - 1) else: doppler_coord += int(rd_matrix.shape[1]/2) rd_img[range_coord*4:(range_coord*4+4), doppler_coord*4:(doppler_coord*4+4)] = [0., 0., 0.] plt.imsave(path, rd_img) plt.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sample_pin_position_range():\n #Create a sample goniometer\n g = TopazInHouseGoniometer()\n\n #Initialize the leg limits\n g.relative_sample_position = column([0.0, 0.0, 0.0])\n g.getplatepos(0.0, 0.0, 0.0)\n g.calculate_leg_xy_limits(visualize=True)\n\n# if True:\n# pylab.show()\n# return\n\n n = 17\n positions = np.linspace(-8, 8, n) #Range calculated in mm\n allowed = np.zeros( (n,n,n) )\n for (ix, x) in enumerate(positions):\n print \"Calculating x\", x\n for (iy, y) in enumerate(positions):\n for (iz, z) in enumerate(positions):\n #Set up\n g.relative_sample_position = column([x, y, z])\n allowed[ix,iy,iz] = g.are_angles_allowed([0., 0., 0.], return_reason=False)\n\n #Do a plot\n\n pylab.figure(1, figsize=[15,15])\n pylab.title(\"Allowable XZ sample positions\")\n for (iy, y) in enumerate(positions):\n print \"At y of\", y, \", # of points = \", np.sum( allowed[:, iy,:])\n if iy < 16:\n pylab.subplot(4,4,iy+1)\n pylab.pcolor(positions, positions, allowed[:, iy, :].transpose(), norm=pylab.Normalize(0, 1))\n pylab.xlabel(\"x\")\n pylab.ylabel(\"z\")\n pylab.title(\"y = %.3f mm\" % y)\n pylab.draw()\n pylab.axis('equal')\n pylab.show()\n #pylab.", "def vis_d():\n \n data_gen = generator(fixed_noise)\n# loss = d_loss(discriminator(data_gen), discriminator(grid))\n loss = g_loss(discriminator(grid))\n loss.backward()\n \n grads = - grid.grad.data.numpy()\n grid.grad.data *= 0 \n plt.quiver(X_grid, Y_grid, grads[:, 0], grads[:, 1], color='black',alpha=0.9)", "def do_intensity_projection(points, proj_W , proj_H, proj_fov_up, proj_fov_down, fn, idx):\n\n # print(points.shape)\n\n points = points[points.any(axis=1)]\n\n proj_range = np.zeros((proj_H, proj_W),\n dtype=np.float64)\n\n # unprojected range (list of depths for each point)\n unproj_range = np.zeros((0, 1), dtype=np.float32)\n\n # projected point cloud xyz - [H,W,3] xyz coord (-1 is no data)\n proj_xyz = np.full((proj_H, proj_W, 4), -1,\n dtype=np.float32)\n\n # projected remission - [H,W] intensity (-1 is no data)\n proj_remission = np.full((proj_H, proj_W), -1,\n dtype=np.float32)\n\n # projected index (for each pixel, what I am in the pointcloud)\n # [H,W] index (-1 is no data)\n proj_idx = np.full((proj_H, proj_W), -1,\n dtype=np.int32)\n\n # for each point, where it is in the range image\n proj_x = np.zeros((0, 1), dtype=np.int32) # [m, 1]: x\n proj_y = np.zeros((0, 1), dtype=np.int32) # [m, 1]: y\n\n # mask containing for each pixel, if it contains a point or not\n proj_mask = np.zeros((proj_H, proj_W),\n dtype=np.int32) # [H,W] mask\n\n\n\n\n # laser parameters\n fov_up = proj_fov_up / 180.0 * np.pi # field of view up in rad\n fov_down = proj_fov_down / 180.0 * np.pi # field of view down in rad\n fov = abs(fov_down) + abs(fov_up) # get field of view total in rad\n\n\n \n depth = np.linalg.norm(points[:,:3], 2, axis=1)\n\n # print(points[:10,:])\n \n\n # get scan components\n scan_x = points[:, 0]\n scan_y = points[:, 1]\n scan_z = points[:, 2]\n\n # get angles of all points\n yaw = -np.arctan2(scan_y, scan_x) \n pitch = np.arcsin(scan_z / depth)\n\n # get projections in image coords\n proj_x = 0.5 * (yaw / np.pi + 1.0) # in [0.0, 1.0]\n proj_y = 1.0 - (pitch + abs(fov_down)) / fov # in [0.0, 1.0]\n\n proj_x = np.nan_to_num(proj_x)\n\n proj_y = np.nan_to_num(proj_y)\n # scale to image size using angular resolution\n proj_x *= proj_W # in [0.0, W]\n proj_y *= proj_H # in [0.0, H]\n\n \n \n\n # round and clamp for use as index\n proj_x = np.floor(proj_x)\n proj_x = np.minimum(proj_W - 1, proj_x)\n proj_x = np.maximum(0, proj_x).astype(np.int32) # in [0,W-1]\n proj_x = np.copy(proj_x) # store a copy in orig order\n\n proj_y = np.floor(proj_y)\n proj_y = np.minimum(proj_H - 1, proj_y)\n proj_y = np.maximum(0, proj_y).astype(np.int32) # in [0,H-1]\n\n proj_y = np.copy(proj_y) # stope a copy in original order\n\n\n # # copy of depth in original order\n # unproj_range = np.copy(depth)\n\n # indices = np.arange(depth.shape[0])\n # order = np.argsort(depth)[::-1]\n # depth = depth[order]\n # indices = indices[order]\n # points = points[order]\n\n # proj_y = proj_y[order]\n # proj_x = proj_x[order]\n \n\n if DATASET_TYPE == \"kitti\":\n intensities = points[:,3]\n print(\"kitti\")\n # intensities = np.minimum(intensities, 1000)\n # i_min = intensities.min()\n # i_max = intensities.max()\n # intensities = (intensities - i_min)/(i_max - i_min)\n\n\n\n if DATASET_TYPE == \"mulran\" or DATASET_TYPE == \"mulran2\":\n intensities = points[:,3]\n intensities = np.minimum(intensities, 1000)\n i_min = intensities.min()\n i_max = intensities.max()\n \n intensities = (intensities - i_min)/(i_max - i_min)\n\n if DATASET_TYPE == \"dso\":\n \n \n \n intensities = points[:,4]\n \n\n minval = np.percentile(intensities, 2)\n maxval = np.percentile(intensities, 98)\n intensities = np.clip(intensities, minval, maxval)\n # intensities = np.maximum(intensities, 5000)\n # intensities = np.sqrt(intensities)\n\n \n\n\n \n i_min = intensities.min()\n i_max = intensities.max()\n\n intensities = (intensities - i_min)/(i_max - i_min)\n\n \n\n\n \n\n \n\n \n \n\n\n \n \n \n \n \n pixel_tracker = {}\n pc_tracker = {}\n # print(proj_x.shape)\n # print(scan_x.shape)\n\n \n proj_3d_corres = np.zeros((proj_H, proj_W, 3),\n dtype=np.float64)\n\n # print(proj_x[:20])\n # print(proj_y[:70])\n \n \n for i in range(proj_x.shape[0]):\n x_val = proj_x[i]\n y_val = proj_y[i]\n\n \n\n if proj_range[y_val, x_val] != 0:\n continue\n\n \n \n intensity = intensities[i]\n \n \n \n \n proj_range[y_val, x_val] = intensity\n \n proj_3d_corres[y_val,x_val, :] = np.array([scan_x[i], scan_y[i], scan_z[i]])\n \n\n\n \n proj_range *= 255\n\n\n \n \n \n \n proj_range = np.array(proj_range, dtype=np.uint8)\n\n \n newPicPath = None\n\n\n \n\n\n img = Image.fromarray(proj_range, 'L')\n pc_name = fn.split('.')[0]\n newPicPath = os.path.join(CURRENT_DIR, \"intensity_images\", \"mulran_\" + (str(idx)) + \".png\")\n img.save(newPicPath)\n\n\n return newPicPath, proj_3d_corres, proj_range", "def visualize_scan(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(self.p1_points[:, 0], self.p1_points[:, 1], self.p1_points[:, 2], c='r')\n ax.scatter(self.p2_points[:, 0], self.p2_points[:, 1], self.p2_points[:, 2], c='g')\n ax.scatter(self.p3_points[:, 0], self.p3_points[:, 1], self.p3_points[:, 2], c='b')\n ax.scatter(self.p4_points[:, 0], self.p4_points[:, 1], self.p4_points[:, 2])\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n plt.show()", "def plot_slice(self,res):\n x = np.linspace(0,1,res)\n y = np.linspace(0,1,res)\n X,Y = np.meshgrid(x,y)\n plt.figure()\n ax = plt.axes(projection = '3d')\n ax.plot_surface(X,Y,abs(self.psi)[:,:,math.floor(res/2)])\n plt.show()", "def plot_figure4(df, left_colNames, right_colNames, dvmin=-60, dvmax=61, step=10, show=False, write_to=None):\n\tleft = df[left_colNames]\n\tright = df[right_colNames]\n\tleft.rename(columns=dict(zip(left_colNames, range(1,7))), inplace=True)\n\tright.rename(columns=dict(zip(right_colNames, range(1,7))), inplace=True)\n\n\t# item-wise matrix operation, left minus right. positive if left value is greater\n\tdiff = left.combine(right, lambda l, r: l-r) \n\t# count how many times left >= right\n\tdiff['count'] = diff.gt(0).sum(axis=1)\n\t# sum(left) - sum(right)\n\tdiff['diff'] = left.sum(axis=1)-right.sum(axis=1)\n\t# 1 chose left, 0 chose right. Flipped 'side_chosen' column\n\tdiff['choice'] = np.logical_xor(df['side_chosen'],1).astype(int)\n\tdiff = diff.sort_values(by=['diff'], ascending=True)\n\n\t# Fig 4A Blue line\n\t# calculate prob(choose left) for each bucket when left >= right MORE than three times \n\t# left win times: 6/0, 5/1, 4/2. tie 3/3 ignored\n\twgt3 = diff.loc[diff['count'] > 3]\n\twgt3_res = group(wgt3, dvmin, dvmax, step)\n\n\t# Fig 4A Red line\n\t# calculate prob(choose left) for each bucket when left >= right FEWER than three times \n\t# left win times: 0/6, 1/5, 2/4. tie 3/3 ignored\n\twlt3 = diff.loc[diff['count'] < 3]\n\twlt3_res = group(wlt3, dvmin, dvmax, step)\n\n\tif show:\n\t\tx = np.array([x for x in range(dvmin, dvmax, step)])\n\t\tfig = go.Figure()\n\t\tfig.add_trace(go.Scatter(x=x, y=wgt3_res, mode='lines', name='W>3', line=dict(color='blue')))\n\t\tfig.add_trace(go.Scatter(x=x, y=wlt3_res, mode='lines', name='W<3', line=dict(color='red')))\n\t\tfig.show()\n\tif write_to is not None:\n\t\tfig.write_image(write_to)\n\treturn wgt3_res, wlt3_res", "def plot_2D_edp(self, xmin=-100, xmax=100, zmin=-100, zmax=100, N=201):\n rho_xz = []\n xgrid = np.linspace(xmin, xmax, num=N)\n zgrid = np.linspace(zmin, zmax, num=N)\n for x in xgrid:\n for z in zgrid:\n tmp = self.phase * self.F * np.cos(self.qx*x+self.qz*z)\n rho_xz.append([x, z, tmp.sum(axis=0)])\n rho_xz = np.array(rho_xz, float) \n X, Y, Z= rho_xz[:,0], rho_xz[:,1], rho_xz[:,2]\n #Y = rho_xz[:,1]\n #Z = rho_xz[:,2]\n X.shape = (N, N)\n Y.shape = (N, N)\n Z.shape = (N, N)\n plt.figure()\n plt.contourf(X, Y, Z)", "def plot_map_1d(self, opt, point, variable, constraints, axes):\n self.optimisation = self.config.build_bump_surrogate_model[\"staged_optimisation\"][opt]\n grid = self.grid_list[opt]\n values = self.value_lists[opt]\n map_ = self.map_list[opt]\n seed_list = self.seed_list[opt]\n inverse_map = self.inverse_map_list[opt]\n limits = self.get_limits()\n fields = self.get_fields()\n field_index = fields.index(point)\n track_x_list = grid.coord_vector(field_index)\n inv_x_list = []\n minim_y_list = []\n minim_x_list = []\n track_off_list = [grid.coord_vector(i) for i in range(4) if i != field_index]\n track_y_list = []\n min_y_list = []\n max_y_list = []\n fit_x_list = []\n fit_y_list = []\n for i, x0 in enumerate(track_x_list[:-1]):\n x1 = track_x_list[1:][i]\n fit_x_list += [x0+(x1-x0)/10.0*i for i in range(10)]\n fit_x_list.append(x1)\n for x in fit_x_list:\n fit_point = []\n for field in fields:\n if field in constraints:\n fit_point.append(constraints[field])\n elif field == point:\n fit_point.append(x)\n print(fit_point)\n fit_value = map_.function(fit_point)\n if inverse_map == None:\n inv_x_list.append(0.)\n else:\n inverse_point = inverse_map.function(fit_value)\n inv_x_list.append(inverse_point[field_index])\n minimiser_point = minimiser.get_multipoint(map_, seed_list, fit_value, limits)\n minimiser_point = [minimiser.get_point(map_, seed_list, fit_value, limits)]\n for test_point in minimiser_point:\n minim_x_list.append(test_point[field_index])\n minim_y_list.append(fit_value[variable])\n\n if x in track_x_list:\n track_y_list.append(fit_value[variable])\n min_y_list.append(fit_value[variable])\n max_y_list.append(fit_value[variable])\n for a_list_0 in track_off_list[0]:\n for a_list_1 in track_off_list[0]:\n for a_list_2 in track_off_list[0]:\n fit_point = [a_list_0, a_list_1, a_list_2]\n fit_point.insert(field_index, x)\n value = map_.function(fit_point)\n min_y_list[-1] = min(min_y_list[-1], value[variable])\n max_y_list[-1] = max(max_y_list[-1], value[variable])\n fit_y_list.append(fit_value[variable])\n\n print(\"Verse \", fit_x_list) \n print(\"Inverse\", inv_x_list) \n print(\"Minim \", minim_x_list) \n x_axis = point.replace(\"__\", \"\").replace(\"_\", \" \")\n y_axis = [\"$x$ [mm]\", \"$p_{x}$ [MeV/c]\", \"$y$ [mm]\", \"$p_{y}$ [MeV/c]\"][variable]\n axes.plot(fit_x_list, fit_y_list)\n #axes.plot(inv_x_list, fit_y_list, linestyle=\"-\")\n axes.plot(minim_x_list, minim_y_list, linestyle=\" \", marker=\"x\")\n axes.plot(track_x_list, track_y_list, linestyle=\" \", marker=\"o\")\n axes.plot(track_x_list, min_y_list, linestyle=\" \", marker=\"o\")\n axes.plot(track_x_list, max_y_list, linestyle=\" \", marker=\"o\")\n axes.set_xlabel(x_axis+\" [T]\", fontsize=self.f_size)\n axes.set_ylabel(y_axis, fontsize=self.f_size)", "def create_grid(xlim, ylim, step):\n x_range = np.arange(xlim[0], xlim[1], step)\n y_range = np.arange(ylim[0], ylim[1], step)\n return x_range, y_range", "def show_grid(self) -> None:\n from pymol import cmd\n from PyQt5 import QtWidgets\n\n global x, y, z\n\n if self.input.count() > 0:\n # Get minimum and maximum dimensions of target PDB\n pdb = self.input.currentText()\n ([min_x, min_y, min_z], [max_x, max_y, max_z]) = cmd.get_extent(pdb)\n\n # Get Probe Out value\n probe_out = self.probe_out.value()\n probe_out = round(probe_out - round(probe_out, 4) % round(0.6, 4), 1)\n\n # Prepare dimensions\n min_x = round(min_x - (min_x % 0.6), 1) - probe_out\n min_y = round(min_y - (min_y % 0.6), 1) - probe_out\n min_z = round(min_z - (min_z % 0.6), 1) - probe_out\n max_x = round(max_x - (max_x % 0.6) + 0.6, 1) + probe_out\n max_y = round(max_y - (max_y % 0.6) + 0.6, 1) + probe_out\n max_z = round(max_z - (max_z % 0.6) + 0.6, 1) + probe_out\n\n # Get center of each dimension (x, y, z)\n x = (min_x + max_x) / 2\n y = (min_y + max_y) / 2\n z = (min_z + max_z) / 2\n\n # Draw Grid\n self.draw_grid(min_x, max_x, min_y, max_y, min_z, max_z)\n else:\n QtWidgets.QMessageBox.critical(self, \"Error\", \"Select an input PDB!\")\n return", "def receptive_fields_visualization(W):\n W = W.cpu()\n \n hidden_dim = int(np.sqrt(W.shape[1]))\n side_dim = 10\n indices = [np.random.randint(0,W.shape[0]) for _ in range(side_dim**2)]\n \n fig = plt.figure(figsize=(10,10))\n for i in range(len(indices)):\n ax = fig.add_subplot(side_dim, side_dim, i+1, xticks = [], yticks = [])\n ax.imshow(W[i,:].view(hidden_dim, hidden_dim),cmap = 'gray')\n plt.subplots_adjust(wspace=0.01, hspace=0.01)\n #end\n \n plt.show()\n plt.close('all')", "def plot_1D_edp(self, start=(-10,25), end=(30,-20), N=100):\n rho = []\n x0, z0 = start\n x1, z1 = end\n xpoints = np.linspace(x0, x1, N)\n zpoints = np.linspace(z0, z1, N)\n for x, z in zip(xpoints, zpoints):\n tmp = self.phase * self.F * np.cos(self.qx*x+self.qz*z)\n dist = np.sqrt((x-x0)**2 + (z-z0)**2)\n rho.append([dist, tmp.sum(axis=0)])\n rho = np.array(rho, float)\n X = rho[:,0]\n Y = rho[:,1]\n plt.figure()\n plt.plot(X, Y)", "def __init__(self, \n nd = 2, \n goal = np.array([1.0,1.0]),\n state_bound = [[0,1],[0,1]],\n nA = 4,\n action_list = [[0,1],[0,-1],[1,0],[-1,0]],\n<<<<<<< HEAD:archive-code/puddleworld.py\n ngrid = [10.0,10.0],\n maxStep = 40):\n ngrid = [40, 40]\n x_vec = np.linspace(0,1,ngrid[0])\n y_vec = np.linspace(0,1,ngrid[1])\n for x in x_vec:\n for y in y_vec:\n if ~self.inPuddle([x,y]):\n puddle.append([x,y])\n # puddle is a closed loop \n outpuddlepts = np.asarray(puddle)\n \"\"\"\n\n\n # Horizontal wing of puddle consists of \n # 1) rectangle area xch1<= x <=xc2 && ych1-radius <= y <=ych2+radius\n # (xchi,ychi) is the center points (h ==> horizantal)\n # x, y = state[0], state[1]\n xch1, ych1 = 0.3, 0.7\n xch2, ych2 = 0.65, ych1\n radius = 0.1\n\n\n #Vertical wing of puddle consists of \n # 1) rectangle area xcv1-radius<= x <=xcv2+radius && ycv1 <= y <= ycv2\n # where (xcvi,ycvi) is the center points (v ==> vertical)\n xcv1 = 0.45; ycv1=0.4;\n xcv2 = xcv1; ycv2 = 0.8;\n\n # % 2) two half-circle at end edges of rectangle\n \n # POINTS ON HORIZANTAL LINES OF PUDDLE BOUNDARY\n for x in np.arange(xch1,xcv1-radius,self.meshsize[0]/2):\n puddle.append([x,ych1-radius])\n puddle.append([xcv1-radius,ych1-radius])\n \n for x in np.arange(xcv1+radius,xch2,self.meshsize[0]/2):\n puddle.append([x,ych1-radius])\n \n for x in np.arange(xch1,xcv1-radius,self.meshsize[0]/2):\n puddle.append([x,ych1+radius])\n \n puddle.append([xcv1-radius,ych1+radius])\n\n\n for x in np.arange(xcv1+radius,xch2,self.meshsize[0]/2):\n puddle.append([x,ych1+radius])\n\n # POINTS ON VERTICAL LINES OF PUDDLE BOUNDARY\n for y in np.arange(ycv1,ych1-radius,self.meshsize[1]/2):\n puddle.append([xcv1-radius,y])\n \n for y in np.arange(ycv1,ych1-radius,self.meshsize[1]/2):\n puddle.append([xcv1+radius,y])\n \"\"\"\n for y in np.arrange():\n puddle.append([])\n \n for y in np.arrange():\n puddle.append([])\n \"\"\"\n\n # HALF CIRCLES\n ngridTheta = 10\n thetaVec = np.linspace(0,pi,ngridTheta)\n\n for t in thetaVec:\n puddle.append([xch1+radius*np.cos(pi/2+t),ych1+radius*np.sin(pi/2+t)])\n\n for t in thetaVec:\n puddle.append([xch2+radius*np.cos(-pi/2+t),ych2+radius*np.sin(-pi/2+t)])\n\n for t in thetaVec:\n puddle.append([xcv1+radius*np.cos(pi+t),ycv1+radius*np.sin(pi+t)])\n\n for t in thetaVec:\n puddle.append([xcv2+radius*np.cos(t),ycv2+radius*np.sin(t)])\n\n \n outpuddlepts = np.asarray(puddle)\n return outpuddlepts", "def matrices_and_bounds(must_print):\n\n #Extracting input.\n input = find_input()\n\n #Running the experiment.\n result = bound.execute_script(input, must_print, False)\n\n #Storing output.\n store_output(result) #result = [P_MATRIX, R_MATRIX, P_BOUND, R_BOUND]", "def define_grid():\n grid_left = np.array([[-13.1000000000000, -35.5000000000000, -48.3000000000000, -60, -16.9000000000000,\n -34.8000000000000, -67.5000000000000, -46.1000000000000, -59.8000000000000,\n -14.2000000000000, -28.3000000000000, -42.3000000000000, -67.6000000000000,\n -50.5000000000000, -14.6000000000000, -60.9000000000000, -31.6000000000000,\n -5.10000000000000, -65.6000000000000, -41.8000000000000, -55.1000000000000,\n -22.7000000000000, -5.80000000000000, -49.2000000000000, -34.5000000000000,\n -61.5500000000000, -63.6000000000000, -40.4000000000000, -48.7000000000000,\n -21.8000000000000, -58.2000000000000, -7, -36.3000000000000, -48.1000000000000,\n -56.8000000000000, -7.30000000000000, -22.2000000000000, -36.8000000000000,\n -46.8000000000000],\n [-67.7000000000000, -60, -55.1000000000000, -51.8000000000000, -51.6000000000000,\n -49.3000000000000, -47.1000000000000, -43.7000000000000, -39.6000000000000,\n -39.1000000000000, -31.2000000000000, -30.7000000000000, -30.1000000000000,\n -24.4000000000000, -22.7000000000000, -18.7000000000000, -16.9000000000000,\n -12.6000000000000, -10.8000000000000, -10.2000000000000, -4.01000000000000, 1.20000000000000,\n 2.80000000000000, 3.70000000000000, 3.90000000000000, 6.20000000000000, 8.30000000000000,\n 11.8000000000000, 14.5000000000000, 16, 18.2000000000000, 18.4000000000000, 19.9000000000000,\n 24.6000000000000, 28.5200000000000, 33.8000000000000, 35, 35.4000000000000,\n 35.6000000000000],\n [69.1000000000000, 66, 58.2000000000000, 48, 78, 71.7000000000000, 31, 61.1000000000000,\n 53.3000000000000, 81.1000000000000, 76, 70.2000000000000, 41.2000000000000, 64.4000000000000,\n 80.2000000000000, 50.9000000000000, 75.2000000000000, 77.3000000000000, 37.8000000000000, 67,\n 53.2000000000000, 72, 74.8000000000000, 54.7000000000000, 66.5000000000000, 35.9000000000000,\n 25.7000000000000, 60.7000000000000, 50.5000000000000, 68.9000000000000, 27.3000000000000,\n 70.3000000000000, 59.6000000000000, 44, 20.8000000000000, 61.7000000000000, 57.2000000000000,\n 47, 36]])\n stn_left = np.array([[-14.6, -13.2, -11.7, -9.10, -11.7, -13.2, -7.90, -10],\n [-15.1, -15.1, -15.1, -12.6, -12.6, -12.6, -9.40, -10.1],\n [-5.40, -7.20, -8.70, -8.70, -7.50, -5.10, -10.3, -7.80]])\n grid_right = np.copy(grid_left)\n grid_right[0, :] = grid_right[0, :] * -1\n stn_right = np.copy(stn_left)\n stn_right[0, :] = stn_right[0, :] * -1\n\n return grid_left, grid_right, stn_left, stn_right", "def visualize_network_samples_grid(net, start=5, stop=100, step=10,\n f_ext=\"pdf\"):\n save_dir = make_memnet_checkpoint_dir(\n Path(\"plots/vis_memnet_samples/grid/\"), net)\n if not save_dir.exists():\n save_dir.makedirs()\n inds = list(product(range(start, stop, step), range(start, stop, step)))\n grid_width = len(range(start, stop, step))\n print(\"Loading plants.\")\n inputs, stim_vals = load_plants(\n net.image_width,\n net.trainingset_dir,\n layer_type=net.layer_type,\n normalize=True)\n print(\"Finished.\")\n fig, axes = plt.subplots(grid_width, grid_width, figsize=(50, 50))\n preds = []\n print(\"Making plots...\")\n for i in range(len(stim_vals)):\n if tuple(stim_vals[i]) in inds:\n print(\"Making \", stim_vals[i])\n preds.append(\n net.predict(inputs[i:i + 1], keep_session=True).reshape(\n net.image_width, net.image_width))\n grid_coord = tuple(stim_vals[i] / step)\n ax = axes[int(grid_coord[1]), int(grid_coord[0])]\n # ax = axes[tuple(stim_vals[i] / step)]\n ax.imshow(preds[-1], cmap=\"gray_r\")\n ax.set_xticklabels(\"\")\n ax.set_xticks([])\n ax.set_xticklabels(\"\")\n ax.set_yticks([])\n ax.set_yticklabels(\"\")\n plt.savefig(save_dir.joinpath('grid.' + f_ext))\n print(\"Saved to \", save_dir.joinpath('grid.' + f_ext))\n return", "def grid_points(self):\n for i in range(self.rows):\n for j in range(self.cols):\n min_lat,max_lat,min_lon,max_lon = self.coords_to_min_max_lat_lon((i,j))\n if i == 0:\n print_gps(max_lat,max_lon,\"grid\")\n if j == 0:\n print_gps(max_lat,min_lon,\"grid\")\n if j == 0:\n print_gps(min_lat,min_lon,\"grid\")\n print_gps(min_lat,max_lon,\"grid\")", "def show3(dlist,r=2,c=2,greyscale=False,output=False,samerange=True):\n\n#distrib.show3((d63[:128,:128,0]-1,d0[:128,:128,0]-1,N.log(d63[:128,:128,0]),d63ga[:128,:128,0]),greyscale=True)\n\n M.clf()\n\n fig = M.figure(figsize=(6.4, 6.4), dpi=100) \n axesarr=N.array([[0.01,0.51,0.4,0.4],\n [0.51,0.51,0.4,0.4],\n [0.01,0.01,0.4,0.4],\n [0.51,0.01,0.4,0.4]])\n\n print axesarr\n colorbax = 1.*axesarr\n print colorbax\n colorbax[:,2] = 0.*colorbax[:,2] + 0.03\n colorbax[:,0] += 0.4\n\n print colorbax\n\n if greyscale:\n colorscheme='binary'\n else:\n colorscheme='jet'\n\n # d63, d0, log d63, d63g\n titlearr=[r'$\\delta$',r'$\\delta_{\\rm initial}$',r'$\\log(1+\\delta)$',r'$\\delta_{\\rm Gauss}$']\n\n if (dlist[1] != None):\n min23 = min(min(dlist[2].flatten()),min(dlist[3].flatten()))\n max23 = max(max(dlist[2].flatten()),max(dlist[3].flatten()))\n\n max0 = max(dlist[1].flatten())\n min0 = min(dlist[1].flatten())\n\n initfact = min(max23/max0,min23/min0)\n print min23,max23, initfact\n\n sc = 0\n for d in dlist:\n if (d != None):\n M.axes(axesarr[sc])\n M.title(titlearr[sc],fontsize=23)\n if (sc > 1):\n print titlearr[sc]\n if (samerange):\n M.pcolor(d,cmap=M.get_cmap(colorscheme),vmin = min23,vmax=max23)\n else:\n M.pcolor(d,cmap=M.get_cmap(colorscheme))\n elif (sc == 1):\n #print min(d.flatten()*initfact),max(d.flatten()*initfact)\n if (samerange):\n M.pcolor(d*initfact,cmap=M.get_cmap(colorscheme),vmin = min23,vmax=max23)\n else:\n M.pcolor(d,cmap=M.get_cmap(colorscheme))\n\n else:\n M.pcolor(d,cmap=M.get_cmap(colorscheme))\n\n# if (sc == 1):\n# M.colorbar(ticks=[-0.1,-0.05,0,0.05,0.1])\n# else:\n\n M.axis('tight')\n M.axis('equal')\n M.axis('tight')\n M.xticks([])\n M.yticks([])\n\n cax = M.axes(colorbax[sc])\n M.colorbar(cax=cax)\n\n sc += 1\n\n #M.savefig('showdens.eps',dpi=8)\n #M.gcf().set_size_inches((6.4,6.4))\n #M.gcf().set_size_inches((15.,12.))\n if (output):\n if greyscale:\n M.savefig('showdens_grey.png',dpi=100)\n M.savefig('showdens_grey.pdf')\n else:\n fig.savefig('showdens.png',dpi=100)\n M.savefig('showdens.pdf')\n\n #M.show()", "def visualize_network_samples_grid_1D(net, start=5, stop=100, step=10,\n f_ext=\"pdf\"):\n save_dir = make_memnet_checkpoint_dir(\n Path(\"plots/vis_memnet_samples/grid/\"), net)\n if not save_dir.exists():\n save_dir.makedirs()\n relevant_vals = range(start, stop, step)\n grid_width = len(relevant_vals)\n print(\"Loading plants.\")\n inputs0, stim_vals0 = load_plants(\n net.image_width,\n net.trainingset_dir,\n layer_type=net.layer_type,\n normalize=True)\n inputs0 = inputs0 / 255. # Normalize\n print(\"Finished.\")\n irrelevant_dim = int(net.input_distribution_dim == 0)\n if irrelevant_dim == 0:\n inds = list(product([50], relevant_vals))\n else:\n inds = list(product(relevant_vals, [50]))\n inputs_valid = inputs0[stim_vals0[:, irrelevant_dim] == 50]\n stim_vals = stim_vals0[stim_vals0[:, irrelevant_dim] == 50]\n fig, axes = plt.subplots(\n 1, grid_width, figsize=(50, 50 / len(relevant_vals)))\n preds = []\n inputs = []\n print(\"Making plots...\")\n for i in range(len(stim_vals)):\n if tuple(stim_vals[i]) in inds:\n print(\"Making \", stim_vals[i])\n inputs.append(inputs_valid[i])\n preds = net.predict(inputs, keep_session=True)\n for i in range(len(preds)):\n ax = axes[i]\n ax.imshow(\n preds[i:i + 1].reshape(net.image_width, net.image_width),\n cmap=\"gray_r\")\n ax.set_xticklabels(\"\")\n ax.set_xticks([])\n ax.set_xticklabels(\"\")\n ax.set_yticks([])\n ax.set_yticklabels(\"\")\n plt.savefig(save_dir.joinpath('grid.' + f_ext))\n print(\"Saved to \", save_dir.joinpath('grid.' + f_ext))\n return", "def figure_2d(\r\n self, interpolate_to_uniform: bool = True, solution_vector: bool = None\r\n ):\r\n self.mat_plot_2d.plot_mapper(\r\n mapper=self.mapper,\r\n visuals_2d=self.get_2d.via_mapper_for_source_from(mapper=self.mapper),\r\n interpolate_to_uniform=interpolate_to_uniform,\r\n pixel_values=solution_vector,\r\n auto_labels=AutoLabels(\r\n title=\"Pixelization Mesh (Image-Plane)\", filename=\"mapper\"\r\n ),\r\n )", "def plot_groupdq(self):\n my_plot_gdqs = self.ramp_dm.groupdq[0, :, self.ylim[0]:self.ylim[1], self.xlim[0]:self.xlim[1]]\n (z, y, x) = my_plot_gdqs.shape\n\n # plot the vectors\n fig, axs = plt.subplots(1, 1, figsize=(12, 8))\n axs.set_xlabel('time (s)', fontsize=15)\n axs.set_ylabel('GROUP_DQ', fontsize=15)\n axs.set_title(\"group_dq in {}\".format(self.ramp_file), fontsize=12)\n\n jump_area = np.zeros((y, x))\n for n in range(x):\n for m in range(y):\n if np.any(my_plot_gdqs[:, m, n] > 1):\n axs.plot(my_plot_gdqs[:, m, n], marker='.', markersize=0, linestyle='-', linewidth=0.5, c='r')\n if self.ramp_dm.pixeldq[m, n] == 1:\n jump_area[m, n] = 2\n else:\n jump_area[m, n] = 0\n else:\n axs.plot(my_plot_gdqs[:, m, n], marker='.', markersize=0, linestyle='-', linewidth=0.5, c='b')\n if self.ramp_dm.pixeldq[m, n] == 1:\n jump_area[m, n] = 2\n else:\n jump_area[m, n] = 1\n\n plt.tight_layout()\n dq_vector_name = 'group_dq_vectors.pdf'\n try:\n os.remove(dq_vector_name)\n except:\n pass\n fig.savefig(dq_vector_name, dpi=100)\n\n # location of jumps/do not use\n fig, axs = plt.subplots(1, 1, figsize=(12, 8))\n\n axs.imshow(jump_area, cmap='seismic_r', interpolation='nearest', origin='lower', vmin=0, vmax=2)\n axs.set_title('group_dq including bad pixels', fontsize=15)\n\n plt.tight_layout()\n dq_image_name = 'group_dq_image.pdf'\n try:\n os.remove(dq_image_name)\n except:\n pass\n fig.savefig(dq_image_name, dpi=100)", "def grid(iant,xgrid=[0],ygrid=[0],sleep=4):\n d=Carma(iant).drive()\n d.setOffset(xgrid[0],ygrid[0])\n time.sleep(sleep)\n time.sleep(sleep)\n for y in ygrid:\n for x in xgrid:\n print x,y\n d.setOffset(x,y)\n time.sleep(sleep)", "def get_gridded_parameters(q, xparam=\"x\", yparam=\"y\", zparam=\"z\"):\n plotParamDF = q[ [xparam, yparam, zparam] ]\n plotParamDF[xparam] = plotParamDF[xparam].tolist()\n plotParamDF[yparam] = np.round(plotParamDF[yparam].tolist(), 1)\n plotParamDF = plotParamDF.groupby( [xparam, yparam] ).mean().reset_index()\n plotParamDF = plotParamDF[ [xparam, yparam, zparam] ].pivot( xparam, yparam )\n x = plotParamDF.index.values\n y = plotParamDF.columns.levels[1].values\n X, Y = np.meshgrid( x, y )\n # Mask the nan values! pcolormesh can't handle them well!\n Z = np.ma.masked_where(\n np.isnan(plotParamDF[zparam].values),\n plotParamDF[zparam].values)\n return X,Y,Z", "def gate(self, *dim_ranges):\n relevant_data = self.get_points(*[r.dim for r in dim_ranges])\n mins = np.array([r.min for r in dim_ranges])\n maxes = np.array([r.max for r in dim_ranges])\n test1 = np.alltrue(relevant_data >= mins, axis=1)\n test2 = np.alltrue(relevant_data <= maxes, axis=1)\n final = np.logical_and(test1, test2) \n return DataTable(self.data[final], self.dims, self.legends, self.tags.copy())", "def buildGrid(self, plot=False):\r\n\r\n print(\"Constructing grid\")\r\n # print(\"Grid dims\", self.ne, self.nn, self.nz)\r\n # print(\"Num points\", 2*(self.ne+1)*(self.nn+1)*3, len(self.coords))\r\n\r\n # number of edges\r\n self.ndx = self.ne + 1\r\n self.ndy = self.nn + 1\r\n self.ndz = self.nz + 1\r\n\r\n # extract the triplets\r\n self.points = {}\r\n self.points[\"e\"] = self.coords[0::3]\r\n self.points[\"n\"] = self.coords[1::3]\r\n self.points[\"z\"] = self.coords[2::3]\r\n\r\n print('points e')\r\n print(self.points[\"e\"])\r\n\r\n # Here are the coordinates\r\n self.X0 = np.reshape(self.points[\"e\"][0::2] , (self.ndx,self.ndy), order=\"F\")\r\n self.Y0 = np.reshape(self.points[\"n\"][0::2] , (self.ndx,self.ndy), order=\"F\")\r\n self.Z0 = np.reshape(self.points[\"z\"][0::2] , (self.ndx,self.ndy), order=\"F\")\r\n\r\n self.X1 = np.reshape(self.points[\"e\"][1::2] , (self.ndx,self.ndy), order=\"F\")\r\n self.Y1 = np.reshape(self.points[\"n\"][1::2] , (self.ndx,self.ndy), order=\"F\")\r\n self.Z1 = np.reshape(self.points[\"z\"][1::2] , (self.ndx,self.ndy), order=\"F\")\r\n #\r\n # # visualize\r\n # if plot:\r\n # print(\"plotting\")\r\n # fig = plt.figure()\r\n # ax = fig.add_subplot(111, projection='3d')\r\n # ax.plot_wireframe(f2m*self.X0, f2m*self.Y0, f2m*self.Z0, rstride=1, cstride=1)\r\n # ax.plot_wireframe(f2m*self.X1, f2m*self.Y1, f2m*self.Z1, rstride=1, cstride=1)\r\n # plt.show()\r", "def gate_out(self, *dim_ranges):\n relevant_data = self.get_points(*[r.dim for r in dim_ranges])\n mins = np.array([r.min for r in dim_ranges])\n maxes = np.array([r.max for r in dim_ranges])\n test1 = np.any(relevant_data < mins, axis=1)\n test2 = np.any(relevant_data > maxes, axis=1)\n final = np.logical_or(test1, test2) \n return DataTable(self.data[final], self.dims, self.legends, self.tags.copy())", "def plot_xy(nc,params,tms,lev=None):\n \n import matplotlib.pyplot as plt\n import ggWRFutils as gW\n from datetime import datetime\n import numpy as np\n wvar={}\n for p in params:\n if p != 'Times':\n if p=='WS10':\n wvar[p]=np.sqrt(nc.variables['U10'][:]**2+nc.variables['U10'][:]**2)\n elif p=='UV10': \n wvar['U10']=nc.variables['U10'][:,:,:] \n wvar['V10']=nc.variables['V10'][:,:,:] \n elif p=='UV':\n wvar['U']=nc.variables['U'][:,lev,:,:] \n wvar['V']=nc.variables['V'][:,lev,:,:] \n elif len(nc.variables[p].shape) > 3:\n wvar[p]=nc.variables[p][:,lev,:,:] \n else: \n wvar[p]=nc.variables[p][:] \n Nx,Ny,Nz,lon,lat,dx,dy=gW.getDimensions(nc)\n for p in params:\n if params[p]=='pcolor':\n plt.pcolor(lon,lat,wvar[p][tms,:,:],shading='flat')\n plt.colorbar()\n if params[p]=='contourf':\n plt.contourf(lon,lat,wvar[p][tms,:,:],50)\n plt.colorbar()\n if params[p]=='contour':\n plt.contourf(lon,lat,wvar[p][tms,:,:])\n plt.colorbar()\n if params[p]=='quiver':\n if p=='UV10':\n plt.quiver(lon[::10,::10],lat[::10,::10],wvar['U10'][tms,::10,::10],wvar['V10'][tms,::10,::10],units='width')\n elif p=='UV':\n plt.quiver(lon,lat,wvar['U'][tms,:,:],wvar['V'][tms,:,:])\n plt.hold(True)\n plt.xlim(lon.min(),lon.max())\n plt.ylim(lat.min(),lat.max())\n fig=plt.gcf()\n return fig", "def make_grid(dataset):\n top_left_lat = dataset[\"a\"][0]\n top_left_lng = dataset[\"a\"][1]\n top_right_lng = dataset[\"c\"][1]\n bot_left_lat = dataset[\"b\"][0]\n\n lng_row = []\n lat_col = []\n i = top_left_lng\n while i < top_right_lng:\n lng_row.append(round(i, 5))\n i += step\n j = bot_left_lat\n while j < top_left_lat:\n lat_col.append(round(j, 5))\n j += step\n out_grid = []\n for i in lat_col:\n row = []\n for j in lng_row:\n row.append(\"{0}:{1}:0\".format(i, j))\n out_grid.append(row)\n return out_grid", "def render(self, image=False, **kwargs):\n import matplotlib.pyplot as plt\n\n source = self.alignment_transform.source.points\n target = self.alignment_transform.target.points\n # a factor by which the minimum and maximum x and y values of the warp\n # will be increased by.\n x_margin_factor, y_margin_factor = 0.5, 0.5\n # the number of x and y samples to take\n n_x, n_y = 50, 50\n # {x y}_{min max} is the actual bounds on either source or target\n # landmarks\n x_min, y_min = np.vstack([target.min(0), source.min(0)]).min(0)\n x_max, y_max = np.vstack([target.max(0), source.max(0)]).max(0)\n x_margin = x_margin_factor * (x_max - x_min)\n y_margin = y_margin_factor * (y_max - y_min)\n # {x y}_{min max}_m is the bound once it has been grown by the factor\n # of the spread in that dimension\n x_min_m = x_min - x_margin\n x_max_m = x_max + x_margin\n y_min_m = y_min - y_margin\n y_max_m = y_max + y_margin\n # build sample points for the selected region\n x = np.linspace(x_min_m, x_max_m, n_x)\n y = np.linspace(y_min_m, y_max_m, n_y)\n xx, yy = np.meshgrid(x, y)\n sample_points = np.concatenate(\n [xx.reshape([-1, 1]), yy.reshape([-1, 1])], axis=1\n )\n warped_points = self.alignment_transform.apply(sample_points)\n delta = warped_points - sample_points\n # plot the sample points result\n x, y, = (\n 0,\n 1,\n )\n if image:\n # if we are overlaying points onto an image,\n # we have to account for the fact that axis 0 is typically\n # called 'y' and axis 1 is typically called 'x'. Flip them here\n x, y = y, x\n plt.quiver(sample_points[:, x], sample_points[:, y], delta[:, x], delta[:, y])\n delta = target - source\n # plot how the landmarks move from source to target\n plt.quiver(\n source[:, x],\n source[:, y],\n delta[:, x],\n delta[:, y],\n angles=\"xy\",\n scale_units=\"xy\",\n scale=1,\n )\n # rescale to the bounds\n plt.xlim((x_min_m, x_max_m))\n plt.ylim((y_min_m, y_max_m))\n if image:\n # if we are overlaying points on an image, axis0 (the 'y' axis)\n # is flipped.\n plt.gca().invert_yaxis()\n return self", "def plot_figure3(df, left_colNames, right_colNames, dvmin=-100, dvmax=101, step=20, show=False, write_to=None):\n\t#select left values and right values\n\tleft = df[left_colNames]\n\tright = df[right_colNames]\n\t# 'diff' column: sum(left) - sum(right)\n\tlr_diff = pd.DataFrame({'diff': left.sum(axis=1)-right.sum(axis=1)})\n\t# 'side_chosen': 0 left, 1 right \n\t# 'choice': 1 chose left, 0 chose right. Flipped 'side_chosen' column, b/c we will count the frequency of choosing left\n\tlr_diff['choice'] = np.logical_xor(df['side_chosen'],1).astype(int)\n\t# sort diff from lowest to highest, and group into buckets\n\tlr_diff = lr_diff.sort_values(by=['diff'], ascending=True)\n\t#array of prob(choosing left) for each bucket\n\tgrouped = group(lr_diff, dvmin, dvmax, step)\n\n\t#plot data\n\ty = np.array(grouped)\n\tx = np.array([x for x in range(dvmin, dvmax, step)])\n\tif show:\n\t\tfig = go.Figure()\n\t\tfig.add_trace(go.Scatter(x=x, y=y, name=\"linear\", line_shape='linear'))\n\t\tfig.show()\n\tif write_to is not None:\n\t\tfig.write_image(write_to)\n\treturn x, y", "def calcBeamplot(PALC_config, gamma_n, plt_ranges, f, dire_meas_LSA, dire_meas_deg):\n # general\n N = PALC_config.N\n \n########################## SIMULATION SETUP ###############################\n # maximum of discrete mapping points:\n max_points = 10000\n # mesh\n x_range = plt_ranges.p_x\n y_range = plt_ranges.p_y\n pts_x = max_points / (y_range[1]-y_range[0])\n pts_y = max_points / (x_range[1]-x_range[0])\n\n x = np.linspace(x_range[0], x_range[1], num=int(pts_x+1))\n y = np.linspace(y_range[0], y_range[1], num=int(pts_y+1))\n\n X, Y = np.meshgrid(x,y)\n # get vertically array\n x_vert = np.reshape(X, np.size(X))\n y_vert = np.reshape(Y, np.size(Y))\n z_vert = np.array([0])\n\n # reference pressure\n p0 = 2 * 10**(-5)\n # considered frequency\n omega = 2 * np.pi * f\n # initialize driving fct. and output array\n D_opt_LSA = np.ones([N, 1])\n P_LSA = np.zeros([np.shape(x_vert)[0],1], dtype=complex) \n\n # air attenuation\n T = 293.15\n p = 101.325 * 10**(3)\n h = 50\n alpha, c = AirAbsorptionCoefficient(f, T, p, h)\n\n # directivity\n if PALC_config.directivity not in ['Measured Loudspeaker Data']:\n dire_meas_LSA = np.ones([1,1])\n dire_meas_deg = np.ones([1,1])\n \n ######################### SPL CALCULATION #################################\n x_start, y_start, x_stop, y_stop, x_c_n, y_c_n, x_S, y_S = source_pos(gamma_n, PALC_config)\n\n G_LSA_vert = CalcGreenFunctions(x_vert, y_vert, z_vert, x_c_n, y_c_n, 0.82,\\\n PALC_config.directivity, PALC_config.Lambda_y, \\\n gamma_n, c, omega, 1, dire_meas_LSA, \\\n dire_meas_deg, alpha, f, 0 )\n\n P_LSA[:,0] = G_LSA_vert @ D_opt_LSA[:,0]\n\n p_SPL = 20 * np.log10(np.abs(P_LSA) / p0)\n \n p_SPL = np.reshape(p_SPL, np.shape(X))\n\n return p_SPL, X, Y", "def SLTrace(self,NSL=12,Pts=[]):\n \n #Grid edge\n Bound_vert=[(0,0),(1,0),(1,1),(0,1),(0,0)]\n Bound_vert_phy=[]\n for i in range(len(Bound_vert)):\n Bound_vert_phy.append(self.Pts2Physic(Bound_vert[i]))\n \n #Streamline\n if(len(Pts)==0): #if the initial Pts are not provided\n Pts=PointOnUnitSquare(NSL,Endpoint=False)\n else:\n NSL=len(Pts)\n \n SL=[]\n SL_phy=[]\n TOF_phy=[]\n \n for i in range(len(Pts)):\n temp=self.Trace1SL(Pts[i])\n SL.append(temp[2])\n SL_phy.append(temp[3])\n TOF_phy.append(temp[5])\n \n #SL_phy=self.RotateSL(SL_phy)\n #SL_phy=self.TranslateSL(SL_phy)\n \n fig, axs = plt.subplots(ncols=2)\n \n ax=axs[0]\n ax.plot(*np.asarray(Bound_vert).T,lw=3,color='red')\n for i in range(len(Pts)):\n ax.plot(*np.asarray(SL[i]).T,lw=1,marker='o',markersize=1,color='blue')\n ax.set_ylim(bottom=0)\n ax.set_aspect('equal')\n ax.set_title(r'Transformed Space ($\\alpha,\\beta$)')\n \n ax=axs[1]\n ax.plot(*np.asarray(Bound_vert_phy).T,lw=3,color='red')\n for i in range(len(Pts)):\n ax.plot(*np.asarray(SL_phy[i]).T,lw=1,marker='o',markersize=1,color='blue')\n ax.set_ylim(bottom=0)\n ax.set_aspect('equal')\n ax.set_title(r'Physical Space ($x,y$)')\n\n fig.tight_layout()\n plt.show()\n return SL_phy,TOF_phy", "def viz_samples(data, trace, num_sweeps, K, viz_interval=3, figure_size=3, title_fontsize=20, marker_size=1.0, opacity=0.3, bound=20, colors=['#AA3377','#0077BB', '#EE7733', '#009988', '#BBBBBB', '#EE3377', '#DDCC77'], save_name=None):\n E_tau, E_mu, E_z = trace['E_tau'].cpu(), trace['E_mu'].cpu(), trace['E_z'].cpu()\n num_rows = len(data)\n num_cols = 2 + int((num_sweeps-1) / viz_interval)\n gs = gridspec.GridSpec(num_rows, num_cols)\n gs.update(left=0.0 , bottom=0.0, right=1.0, top=1.0, wspace=0, hspace=0)\n fig = plt.figure(figsize=(figure_size * num_cols, figure_size * num_rows))\n for row_ind in range(num_rows):\n ax = fig.add_subplot(gs[row_ind, 0])\n viz_gmm(ax, data[row_ind], K, marker_size, opacity, bound, colors, latents=None) ## visualize raw dataset in the 1st column\n if row_ind == 0:\n ax.set_title('Data', fontsize=title_fontsize)\n# col_ind = 1\n for col_ind in range(num_cols-1):\n sweep = col_ind * viz_interval\n ax = fig.add_subplot(gs[row_ind, col_ind+1])\n viz_gmm(ax, data[row_ind], K, marker_size, opacity, bound, colors, latents=(E_tau[sweep, row_ind], E_mu[sweep, row_ind], E_z[sweep, row_ind]))\n if row_ind == 0:\n if sweep == 0:\n ax.set_title('RWS', fontsize=title_fontsize)\n else:\n ax.set_title('sweep %d' % sweep, fontsize=title_fontsize)\n if save_name is not None:\n plt.savefig(save_name + '.svg', dpi=300)", "def _plot_rawdata(self):\n fig, ax = plt.subplots(1, 1)\n ax.imshow(self.data, origin='top')\n ax.set_title('Gauss-Legendre Quadrature Grid')\n ax.set_xlabel('longitude index')\n ax.set_ylabel('latitude index')\n fig.tight_layout(pad=0.5)\n return fig,ax", "def plantGrid(name, minimum, maximum, spacing):\n name = sys.argv[1]\n minimum = sys.argv[2]\n maximum = sys.argv[3]\n minimum = int(minimum)\n maximum = int(maximum)\n #convert to flux\n min1 = conversion(minimum)\n max1 = conversion(maximum)\n min1 = int(min1)\n max1 = int(max1)\n #brightness = [random.uniform(min1, max1) for _ in xrange(len(position))]\n #print brightness\n spacing = sys.argv[4]\n spacing = float(spacing)\n print len(data1)\n #create the position array for x values\n x = np.arange(6, len(data1), spacing)\n #create the position array for y values\n y = np.arange(6, len(data1), spacing)\n x2 = np.arange(6, len(data1), spacing)\n y2 = np.flipud(y)\n \n #combine both arrays to form a grid\n position = np.column_stack((x,y))\n position2 = np.column_stack((x2,y2))\n \n #combine both lines of grid to one array\n position = np.concatenate((position, position2), axis = 0)\n \n #create a random brightness array between the min and max values\n brightness = np.array([random.uniform(min1, max1) for _ in range(0,len(position))])\n \n #add to image file and subtract\n fakestars.addtofits(name, out_file, psf, position, brightness, coordsys, verbose)\n fakestars.addtofits(name, outfile2, psf, position, brightness, coordsys, verbose)\n imarith.imsubtract(out_file, outfile2, differenceFile, clobber=True)", "def get_things1(kp_3d, kp_2d, des, comp_list, H, map_3d, map_2d, map_des, map_cam, map_view, my_max):\n # Initializing the arrays\n points_3d = []\n points_2d = []\n camera_ind = []\n points_ind = []\n cam_params = []\n\n dst_3d = kp_3d\n dst_2d = kp_2d\n src_3d = map_3d\n src_2d = map_2d\n src_cam = map_cam\n low_bound = []\n up_bound = []\n my_min = 0\n\n # Updating the Camera parameters in map and setting the bounds for the update \n for i in range(my_min,my_max+1):\n cam_param = [map_view[i,0], map_view[i,1], map_view[i,2], map_view[i,3], map_view[i,4], map_view[i,5], f,0,0]\n cam_params.append(cam_param)\n\n low_bound.append(-np.pi)\n low_bound.append(-np.pi)\n low_bound.append(-np.pi)\n low_bound.append(-20)\n low_bound.append(-np.inf)\n low_bound.append(-20)\n low_bound.append(f-1)\n low_bound.append(-1)\n low_bound.append(-1)\n up_bound.append(np.pi)\n up_bound.append(np.pi)\n up_bound.append(np.pi)\n up_bound.append(20)\n up_bound.append(np.inf)\n up_bound.append(20)\n up_bound.append(f)\n up_bound.append(0)\n up_bound.append(0)\n \n # Updating the Camera parameters for frame and setting the bounds for the update\n r = (R.from_matrix((H[0:3, 0:3]))).as_rotvec()\n t = H[:,3]\n cam_param = [r[0], r[1], r[2], t[0], t[1], t[2], f, 0, 0]\n cam_params.append(cam_param)\n \n low_bound.append(-np.pi)\n low_bound.append(-np.pi)\n low_bound.append(-np.pi)\n low_bound.append(-20)\n low_bound.append(-np.inf)\n low_bound.append(-20)\n low_bound.append(f-1)\n low_bound.append(-1)\n low_bound.append(-1)\n up_bound.append(np.pi)\n up_bound.append(np.pi)\n up_bound.append(np.pi)\n up_bound.append(20)\n up_bound.append(np.inf)\n up_bound.append(20)\n up_bound.append(f)\n up_bound.append(0)\n up_bound.append(0)\n\n new_cam = len(cam_params)-1\n cam_params = np.array(cam_params).reshape(-1,9)\n count = 0\n \n # listing variables to iterate \n l1 = []\n l2 = []\n count = 0\n \n for m in comp_list:\n count+=1\n l1.append(m.queryIdx)\n l2.append(m.trainIdx)\n\n l1 = np.array(l1).reshape(1,-1)\n l2 = np.array(l2).reshape(1,-1)\n l = np.vstack((l1,l2))\n l_fin = l[:,l[1, :].argsort()]\n j = 0\n count = len(points_3d)\n prev = -1\n final_l1 = []\n final_l2 = []\n final_des = []\n\n # Iterating through the list made and making sure no duplicates\n while(j<(len(l_fin[0]))):\n i1 = l_fin[0,j]\n i2 = l_fin[1,j]\n if(i2!=prev):\n # Map points insertion\n \n check = 0\n for ii in range(len(src_2d[i1])):\n m_2d = src_2d[i1][ii]\n check = 1\n ind = int(src_cam[i1][ii])\n points_2d.append([int((m_2d[0]%(2*cx))-cx), int((m_2d[1]%(2*cy))-cy),0])\n\n points_ind.append(count)\n camera_ind.append(ind)\n final_l1.append(i1)\n final_l2.append(0)\n \n # Taking Mean Desciptor if needed un comment 2 lines below\n # x = ((map_des[i1]*len(src_2d[i1]))+des[i2])/(len(src_2d[i1])+1)\n # map_des[i1] = x\n \n if(check==1):\n # Frame points insersion\n points_2d.append([int((dst_2d[i2,0])-cx), int((dst_2d[i2,1])-cy), 0])\n points_ind.append(count)\n camera_ind.append(new_cam)\n final_l1.append(i2)\n final_l2.append(1)\n wld_pt = src_3d[i1]\n points_3d.append([wld_pt[0], wld_pt[1], wld_pt[2]])\n prev = i2\n count = len(points_3d)\n low_bound.append(-20)\n low_bound.append(-np.inf)\n low_bound.append(-20)\n up_bound.append(20)\n up_bound.append(np.inf)\n up_bound.append(20)\n src_2d[i1].append([int((dst_2d[i2,0])), int((dst_2d[i2,1]))])\n j+=1\n \n # Final Output\n cam_params = np.array(cam_params).reshape(-1,9)\n points_3d = np.array(points_3d)\n points_2d = np.array(points_2d)\n camera_ind = np.array(camera_ind).reshape(len(camera_ind))\n points_ind = np.array(points_ind).reshape(len(points_ind))\n final_l1 = np.array(final_l1)\n final_l2 = np.array(final_l2)\n return cam_params, points_3d, points_2d, camera_ind, points_ind, final_l1, final_l2, low_bound, up_bound, map_des, src_2d", "def get_gridded_parameters(q, xparam=\"time\", yparam=\"slist\", zparam=\"v\", round=False):\n plotParamDF = q[ [xparam, yparam, zparam] ]\n if round:\n plotParamDF[xparam] = np.array(plotParamDF[xparam]).astype(int)\n plotParamDF[yparam] = np.array(plotParamDF[yparam]).astype(int)\n plotParamDF = plotParamDF.groupby( [xparam, yparam] ).agg(np.nanmean).reset_index()\n plotParamDF = plotParamDF[ [xparam, yparam, zparam] ].pivot( xparam, yparam )\n x = plotParamDF.index.values\n y = plotParamDF.columns.levels[1].values\n X, Y = np.meshgrid( x, y )\n # Mask the nan values! pcolormesh can't handle them well!\n Z = np.ma.masked_where(\n np.isnan(plotParamDF[zparam].values),\n plotParamDF[zparam].values)\n return X,Y,Z", "def draw_laser_ranges():\n NUM_RANGES = len(D.ranges) # should be 360\n if False: #for easy commenting out...\n for angle in range(NUM_RANGES):\n print angle, \":\", D.ranges[angle] \n \n # helpful starting points, perhaps:\n # add line to the ranges image, \"D.image\"\n #cv.Line(D.image, (42,100), (100,42), cv.RGB(255, 0, 0), 1) # 1 == thickness\n # add dots to image being used to compute the Hough tr. \"D.hough\"\n # cv.Line(D.hough, (42,42), (42,42), 255, 2) # 1 == thickness\n for angle in range(NUM_RANGES):\n point = (CENTER + int(0.2*D.ranges[angle]*sin(radians(angle))), CENTER + int(0.2*D.ranges[angle]*cos(radians(angle))))\n cv.Line(D.image, (CENTER,CENTER), point, cv.RGB(255, 0 , 0), 1)\n cv.Line(D.hough, point, point, 255, 2) \n\n return", "def visualise_dataset_balancer_results(results, range=(-0.5, 0.5),\n colors=(\"#64B3DE\", \"#1f78b4\", \"#B9B914\", \"#FBAC44\", \"#bc1659\", \"#33a02c\", \"grey\", \"#b15928\", \"#6a3d9a\", \"#e31a1c\", \"#6ABF20\", \"#ff7f00\", \"#6a3d9a\"),\n exclude=(\"SVM (linear)\", \"Logistic regression\", \"Random forest\")):\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n file_name = \"raw_dump_{0}.txt\".format(current_time)\n with open(os.path.dirname(os.path.realpath(__file__)) + \"/../results/\" + file_name, \"wb\") as output_file:\n output_file.write(str(results))\n sns.set(style='ticks')\n fig = plt.figure(figsize=(10, 8))\n ax = fig.add_subplot(1, 1, 1)\n markers = [\"s\", \"d\", \"o\", \"^\", \"*\"]\n size = [150, 200, 200, 200, 250]\n hatches = [None, \"////\", \"..\"]\n\n # Move left y-axis and bottom x-axis to centre, passing through (0,0)\n ax.spines['left'].set_position('center')\n ax.spines['bottom'].set_position((\"axes\", 0.5))\n\n # Eliminate upper and right axes\n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')\n\n # Show ticks in the left and lower axes only\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n ax.set_axis_on()\n ax.spines['left'].set_color('black')\n ax.spines['bottom'].set_color('black')\n plt.xlabel(\"Change in TPR\")\n plt.ylabel(\"Change in TNR\")\n\n ax.xaxis.set_label_coords(0.1, 0.53)\n ax.yaxis.set_label_coords(0.53, 0.9)\n\n plt.ylim(range[0], range[1])\n plt.xlim(range[0], range[1])\n balancer_labels = ([], [])\n classifier_labels = ([], [])\n data_set_index = 0\n for (data_set, dataset_result) in results:\n\n none_true_pos_per_classifier = {}\n none_true_neg_per_classifier = {}\n\n for (classifier_description, result_arr) in dataset_result:\n for (balancer_description, results) in result_arr:\n if balancer_description == \"None\":\n none_true_pos_per_classifier[classifier_description] = results[3]\n none_true_neg_per_classifier[classifier_description] = results[4]\n break\n\n i = 0\n for (classifier_description, result_arr) in dataset_result:\n if classifier_description in exclude:\n continue\n balancer_index = 0\n for (balancer_description, results) in result_arr:\n if balancer_description != \"None\":\n if data_set_index == 0 and balancer_index == 0:\n classifier_labels[0].append(mpatches.Patch(color=colors[i], label=classifier_description, alpha=0.8))\n classifier_labels[1].append(classifier_description)\n ax.scatter(results[3] - none_true_pos_per_classifier[classifier_description], results[4] - none_true_neg_per_classifier[classifier_description],\n marker=markers[balancer_index % len(markers)], hatch=hatches[balancer_index % len(hatches)], s=size[balancer_index % len(markers)], alpha=0.8, color=colors[i],\n edgecolor=\"black\" if colors[i] != \"black\" else \"grey\", zorder=balancer_index % len(markers), lw=0.8)\n # Work around to get legend entries correct\n pt = ax.scatter(-99999999999, -9999999999, marker=markers[balancer_index % len(markers)], hatch=hatches[balancer_index % len(hatches)], s=200, alpha=0.8, color=\"white\",\n edgecolor=\"black\", zorder=data_set_index, lw=0.8)\n if i == 0:\n balancer_labels[0].append(pt)\n balancer_labels[1].append(balancer_description)\n balancer_index += 1\n i += 1\n data_set_index += 1\n legend = plt.legend(balancer_labels[0] + classifier_labels[0], balancer_labels[1] + classifier_labels[1], loc='lower center', bbox_to_anchor=(0.5, -0.2), fancybox=False, frameon=False, ncol=7)\n legend.get_frame().set_facecolor('#ffffff')\n\n sns.despine()\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n plt.savefig(os.path.dirname(os.path.realpath(__file__)) + \"/../results/classifier_dataset_plt_{0}.png\".format(current_time), bbox_extra_artists=((legend,)), bbox_inches='tight')\n plt.close(fig)", "def final_viz(undist, left_fit, right_fit, m_inv, left_curve, right_curve, vehicle_offset):\n\n # Generate x and y values for plotting\n ploty = np.linspace(0, undist.shape[0]-1, undist.shape[0])\n left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]\n right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]\n\n # Create an image to draw the lines on\n #warp_zero = np.zeros_like(warped).astype(np.uint8)\n #color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n color_warp = np.zeros((720, 1280, 3), dtype='uint8') # NOTE: Hard-coded image dimensions\n\n # Recast the x and y points into usable format for cv2.fillPoly()\n pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])\n pts = np.hstack((pts_left, pts_right))\n\n # Draw the lane onto the warped blank image\n cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))\n\n # Warp the blank back to original image space using inverse perspective matrix (Minv)\n newwarp = cv2.warpPerspective(color_warp, m_inv, (undist.shape[1], undist.shape[0]))\n # Combine the result with the original image\n result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)\n\n # Annotate lane curvature values and vehicle offset from center\n avg_curve = (left_curve + right_curve)/2\n label_str = 'Radius of curvature: %.1f m' % avg_curve\n result = cv2.putText(result, label_str, (30,40), 0, 1, (0,0,0), 2, cv2.LINE_AA)\n\n label_str = 'Vehicle offset from lane center: %.1f m' % vehicle_offset\n result = cv2.putText(result, label_str, (30,70), 0, 1, (0,0,0), 2, cv2.LINE_AA)\n\n plt.imshow(result)\n plt.show()\n\n return result", "def plot_age_deriv(self, period=6., vec_s=0.05, sparse=10, lon=232., lat=46., streamline=False, projection='lambert',geopolygons=None, showfig=True, vmin=None, vmax=None, sta=True, hillshade=False):\n\t\tif hillshade:\n\t\t\talpha = 0.5\n\t\telse:\n\t\t\talpha = 1.\n\t\tgroup = self['%g_sec'%( period )]\n\t\tderiv_lat_Arr = group['age_deriv_lat_Arr'].value\n\t\tderiv_lon_Arr = group['age_deriv_lon_Arr'].value\n\t\tderiv_msk_Arr = group['age_deriv_msk_Arr'].value\n\t\tage_Arr = group['age_Arr'].value\n\t\tage_Arr_msk = group['age_Arr_msk'].value\n\t\tmask = np.logical_or(deriv_msk_Arr, age_Arr_msk)\n\t\tm = self._get_basemap(projection=projection, geopolygons=geopolygons,hillshade=hillshade)\n\t\tx, y = m(group['lonArr'].value, group['latArr'].value)\n\t\tmy_cmap = pycpt.load.gmtColormap('./cv.cpt')\n\t\tif vmin == None:\n\t\t\tvmin = np.nanmin(age_Arr[~age_Arr_msk])\n\t\t\tvmin = np.ceil(vmin)\n\t\tif vmax == None:\n\t\t\tvmax = np.nanmax(age_Arr[~age_Arr_msk])\n\t\t\tvmax = np.floor(vmax)\n\t\tim = m.pcolormesh(x, y, np.ma.masked_array(age_Arr,mask=age_Arr_msk), cmap=my_cmap, shading='gouraud', vmin=vmin, vmax=vmax, alpha=alpha)\n\t\tcb = m.colorbar(im, \"bottom\", size=\"3%\", pad='2%', format='%d')\n\t\tcb.set_label(\"Age (myr)\", fontsize=12, rotation=0)\n\t\tcb.set_alpha(1)\n\t\tcb.draw_all()\n\t\tax = plt.gca()\n\t\tif streamline:\n\t\t\trEarth = 6370997.0 # Earth radius\n\t\t\tseed = np.array(m(lon, lat))\n\t\t\tdx = deriv_lon_Arr * rEarth * np.cos(group['latArr'].value * np.pi / 180)\n\t\t\tdy = deriv_lat_Arr * rEarth\n\t\t\tstrm = ax.streamplot(x[0,:], y[:,0], dx, dy, color='gray', linewidth=1, start_points=seed.reshape(1,2))\n\t\t\tax.plot(seed[0], seed[1], marker='o',color='gray')\n\t\telse:\n\t\t\tllons = group['lonArr'].value[~mask]\n\t\t\tllats = group['latArr'].value[~mask]\n\t\t\tdlon = deriv_lon_Arr[~mask]\n\t\t\tdlat = deriv_lat_Arr[~mask]\n\t\t\tdabs = np.sqrt(dlon[::sparse]**2 + dlat[::sparse]**2)\n\t\t\tm.quiver(llons[::sparse], llats[::sparse], dlon[::sparse]/dabs, dlat[::sparse]/dabs, latlon=True, pivot='tail',scale=1./vec_s)\n\t\tif sta:\n\t\t\tself.sta_on_plot(ax,m,period)\n\t\tfig = plt.gcf()\n\t\tfig.suptitle(str(period)+' sec', fontsize=14,y=0.95)\n\t\tif showfig:\n\t\t\tplt.show()\n\t\telse:\n\t\t\tplt.close('all')\n\t\tif streamline:\n\t\t\treturn strm\n\t\tpass", "def CreateTargetGeoField(nbtimestep,latlen,lonlen):\n\n pres_grid = np.zeros((nbtimestep, latlen, lonlen))\n u_grid = np.zeros((nbtimestep, latlen, lonlen))\n v_grid = np.zeros((nbtimestep, latlen, lonlen))\n\n return pres_grid,u_grid,v_grid", "def visualise_cppn(self, resolution=(64, 64)):\n import matplotlib.pyplot as plt\n from matplotlib.pyplot import imshow\n data = np.empty([resolution[0], resolution[1]])\n x_linspace = np.linspace(-1, 1, resolution[0])\n y_linspace = np.linspace(-1, 1, resolution[1])\n for row, x in enumerate(x_linspace):\n for col, y in enumerate(y_linspace):\n data[row, col] = self.graph(np.array([x, y, 0, 0], dtype=np.float32))[0]\n #plt.axis([-1, 1, -1, 1])\n print(data.min(), \" \", data.max())\n imshow(data, cmap='Greys', vmin=-1, vmax=1)\n plt.show()", "def plot_Q_function(self):\r\n input_state = np.zeros([1, self.feature_number])\r\n input_action = np.zeros([1, self.action_space])\r\n actions = np.linspace(-3., 3., 50)\r\n v_ego = np.linspace(0., 30., 50)\r\n if self.feature_number == 1:\r\n Q_map = np.zeros((len(v_ego), len(actions)))\r\n for v in range(len(v_ego)):\r\n for a in range(len(actions)):\r\n input_state[0, 0] = self.v_set - v_ego[v]\r\n input_state = input_state.astype(float)\r\n input_action[0, 0] = actions[a]\r\n Q_map[v, a] = self.critic.predict([input_state, input_action])\r\n elif self.feature_number == 2:\r\n \"\"\"TODO: Adjust to DDPG critic layout\"\"\"\r\n Q_map = np.zeros((500, 20, self.action_space))\r\n for distance in range(500):\r\n for delta_v in range(-10, 10):\r\n input[0, 0] = distance\r\n input[0, 1] = delta_v\r\n Q_map[distance, delta_v, :] = self.critic.predict(input)\r\n elif self.feature_number == 3:\r\n \"\"\"TODO: Implementation\"\"\"\r\n return Q_map", "def plot_age_traj(self, period=6., lon=232., lat=46., N=30, streamline=False, vec=False, projection='lambert',geopolygons=None, showfig=True, vmin=None, vmax=None, sta=True, hillshade=False):\n\t\tif hillshade:\n\t\t\talpha = 0.5\n\t\telse:\n\t\t\talpha = 1.\n\t\tgroup = self['%g_sec'%( period )]\n\t\tage_Arr = group['age_Arr'].value\n\t\tage_Arr_msk = group['age_Arr_msk'].value\n\t\tm = self._get_basemap(projection=projection, geopolygons=geopolygons,hillshade=hillshade)\n\t\tif streamline:\n\t\t\tllons, llats = self._cons_traj_stream(period=period,lon=lon,lat=lat,N=N,projection=projection)\n\t\t\tm = self._get_basemap(projection=projection, geopolygons=geopolygons,hillshade=hillshade)\n\t\t\tx_traj, y_traj = m(llons,llats)\n\t\telse:\n\t\t\tmask = self._cons_traj(lon=lon,lat=lat,period=period)\n\t\t\tllons = group['lonArr'].value[~mask]\n\t\t\tllats = group['latArr'].value[~mask]\n\t\t\tif vec:\n\t\t\t\tderiv_lat_Arr = group['age_deriv_lat_Arr'].value\n\t\t\t\tderiv_lon_Arr = group['age_deriv_lon_Arr'].value\n\t\t\t\tdlon = deriv_lon_Arr[~mask]\n\t\t\t\tdlat = deriv_lat_Arr[~mask]\n\t\t\t\tdabs = np.sqrt(dlon**2 + dlat**2)\n\t\t\t\tm.quiver(llons, llats, dlon/dabs, dlat/dabs, latlon=True, pivot='tail')\n\t\t\tx_traj, y_traj = m(llons, llats)\n\t\t\tind = np.argsort(x_traj)\n\t\t\tx_traj = x_traj[ind]\n\t\t\ty_traj = y_traj[ind]\n\t\tx, y = m(group['lonArr'].value, group['latArr'].value)\n\t\tmy_cmap = pycpt.load.gmtColormap('./cv.cpt')\n\t\tif vmin == None:\n\t\t\tvmin = np.nanmin(age_Arr[~age_Arr_msk])\n\t\t\tvmin = np.ceil(vmin)\n\t\tif vmax == None:\n\t\t\tvmax = np.nanmax(age_Arr[~age_Arr_msk])\n\t\t\tvmax = np.floor(vmax)\n\t\tim = m.pcolormesh(x, y, np.ma.masked_array(age_Arr,mask=age_Arr_msk), cmap=my_cmap, shading='gouraud', vmin=vmin, vmax=vmax, alpha=alpha)\n\t\tcb = m.colorbar(im, \"bottom\", size=\"3%\", pad='2%', format='%d')\n\t\tcb.set_label(\"Age (myr)\", fontsize=12, rotation=0)\n\t\tcb.set_alpha(1)\n\t\tcb.draw_all()\n\t\tax = plt.gca()\n\t\tax.plot(x_traj, y_traj,color='gray')\n\t\tif sta:\n\t\t\tself.sta_on_plot(ax,m,period)\n\t\tfig = plt.gcf()\n\t\tfig.suptitle(str(period)+' sec', fontsize=14,y=0.95)\n\t\tif showfig:\n\t\t\tplt.show()\n\t\treturn", "def input_space_plt(model, plot_range=(-2., 2.), num_steps=201, save_fig=''):\n # Grid the input space\n grid = torch.zeros((num_steps * num_steps, 2))\n idx = 0\n for x1 in np.linspace(plot_range[0], plot_range[1], num_steps):\n for x2 in np.linspace(plot_range[0], plot_range[1], num_steps):\n grid[idx, :] = torch.Tensor([x1, x2])\n idx += 1\n\n # Calculate values predicted by model on grid\n predictions = model(grid)\n pred_grid = predictions.view(num_steps, num_steps).detach()\n\n # Set up a custom color map where -1 is mapped to blue and 1 to red\n colors = [(1, 1, 1), (0, 0, 1), (0.5, 0, 0.5), (1, 0, 0), (1, 1, 1)]\n colormap = LinearSegmentedColormap.from_list('cmap_red_blue', colors, N=300)\n\n # Plot input space as a heatmap\n plt.imshow(pred_grid, vmin=-2., vmax=2., cmap=colormap, alpha=0.75)\n plt.colorbar()\n plt.tick_params(axis='both', which='both', bottom=False, top=False,\n labelbottom=False, right=False, left=False,\n labelleft=False)\n\n if len(save_fig):\n plt.savefig(save_fig, format='png', dpi=400, bbox_inches='tight')\n plt.clf()\n plt.close()", "def get_graph(self, points):\n\n gridmap = cv2.imread(self.ruta_imagen, -1)\n\n gridmap = self.four_point_transform(gridmap, points)\n\n gridmap[(gridmap >= 179) & (gridmap <= 238)] = 0\n gridmap[(gridmap >= 241) & (gridmap <= 255)] = 255\n\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))\n gridmap_dilatated = cv2.dilate(cv2.bitwise_not(gridmap), kernel, iterations=1)\n gridmap_dilatated = cv2.bitwise_not(gridmap_dilatated)\n\n scale_percent = 25 # percent of original size\n width = int(gridmap_dilatated.shape[1] * scale_percent / 100)\n height = int(gridmap_dilatated.shape[0] * scale_percent / 100)\n dim = (width, height)\n gridmap_resized = cv2.resize(gridmap_dilatated, dim, interpolation=cv2.INTER_NEAREST)\n\n self.gridmap2graph(gridmap_resized, width, height)\n\n return gridmap_resized, width, height", "def run(self): \n\n # Dictionaries whose keys are labels of the points in a 2-D grid and values\n # are an instance of the class meshPoint holding the informaiton about \n # that mesh point\n self.boundaryPoints = {}\n self.internalPoints = {}\n\n # Rectangle \n if self.layoutType.lower() == 'rectangle': \n # Define the mesh for a rectanglular layout\n self.defineRectangleLayout()\n # Circle \n elif self.layoutType.lower() == 'circle':\n # Define the mesh for a circular layout\n self.defineCircleLayout()\n\n return [self.internalPoints,self.boundaryPoints]", "def slice_explorer(data, cmap='gray'):\n data_len = len(data)\n\n @interact(plane=(0, data_len-1), continuous_update=False)\n def display_slice(plane=data_len/2):\n fig, axis = plt.subplots(figsize=(20, 7))\n axis_3d = fig.add_subplot(133, projection='3d')\n show_plane(axis, data[plane], title='Plane {}'.format(plane), cmap=cmap)\n slice_in_3d(axis=axis_3d, shape=data.shape, plane=plane)\n plt.show()\n\n return display_slice", "def velocity_map(self, output='test'):\n self.figure = figure(figsize=(10,3))\n self.axes = self.figure.gca() \n xWindowLim = (self.analyst.windowSize[0], self.analyst.windowSize[1])\n yWindowLim = (self.analyst.windowSize[2], self.analyst.windowSize[3])\n \n # Generate contours for velocity magnitude \n xGrid = linspace(\\\n xWindowLim[0]*self.millimetersPerPixel, \n xWindowLim[1]*self.millimetersPerPixel, self.nbins)\n yGrid = linspace(\\\n yWindowLim[0]*self.millimetersPerPixel, \n yWindowLim[1]*self.millimetersPerPixel, self.nbins)\n magVelGrid = griddata(self.xs, self.ys, self.magVel, xGrid, yGrid) \n # csf = self.axes.contourf(xGrid, yGrid, magVelGrid, range(2,26,2), cmap=myColorMap)\n csf = self.axes.contourf(xGrid, yGrid, magVelGrid, cmap=myColorMap)\n cbar = self.figure.colorbar(csf) \n cbar.set_label(\"Velocity magnitude, px/s\")\n \n # Generate arrow plot\n # q = self.axes.quiver(self.xs, self.ys, self.us, self.vs,\n # angles = 'xy', scale_units='xy', scale=2, pivot = 'mid')\n # self.axes.quiverkey(q, 0.9, 1.0, 10, \"10 px/frame\", coordinates='axes') \n \n # Save figure \n self.axes.set_aspect('equal')\n self.axes.set_xlim(*xWindowLim)\n self.axes.set_ylim(*yWindowLim)\n self.figure.savefig(output + '_velocity_map.pdf')", "def GLDAS025LandGrid():\n return GLDAS025Grids(only_land=True)", "def visualize(self):\n self.octree.updateInnerOccupancy()\n print(\"Start Octomap Visualization\")\n\n # define parameters\n data = imgviz.data.arc2017()\n camera_info = data['camera_info']\n K = np.array(camera_info['K']).reshape(3, 3)\n width=camera_info['width']\n height=camera_info['height']\n\n # get free and occupied grid\n occupied, _ = self.octree.extractPointCloud()\n #frontier = self.gen_frontier()\n \n print(\"load point cloud\")\n window = pyglet.window.Window(\n width=int(1280), height=int(960)\n )\n\n @window.event\n def on_key_press(symbol, modifiers):\n if modifiers == 0:\n if symbol == pyglet.window.key.Q:\n window.on_close()\n\n gui = glooey.Gui(window)\n hbox = glooey.HBox()\n hbox.set_padding(5)\n\n camera = trimesh.scene.Camera(\n resolution=(width, height), focal=(K[0, 0], K[1, 1])\n )\n\n # initial camera pose\n camera_transform = np.array(\n [\n [1, 0, 0, 0],\n [0, -1, 0, 0],\n [0, 0, -1, -5],\n [0.0, 0.0, 0.0, 1.0],\n ],\n )\n\n \n\n occupied_geom = trimesh.voxel.ops.multibox(\n occupied, pitch=self.resolution, colors=[0.0, 0.0, 0.0, 0.5]\n )\n\n # frontier_geom = trimesh.voxel.ops.multibox(\n # frontier, pitch=self.resolution, colors=[1.0, 0, 0, 0.5]\n # )\n scene = trimesh.Scene(camera=camera, geometry=[occupied_geom])#, frontier_geom])\n scene.camera_transform = camera_transform\n hbox.add(self.labeled_scene_widget(scene, label='octomap'))\n\n\n gui.add(hbox)\n pyglet.app.run()", "def plot_visual_abstract():\n # Which generations to plot\n GENERATIONS = [100, 230, 350]\n\n # LunarLander CMA-ES\n experiment_path = glob(\"experiments/wann_LunarLander-v2_CMAES*\")\n assert len(experiment_path) == 1, \"There should be only one CMA-ES experiment with LunarLander-v2\"\n experiment_path = experiment_path[0]\n\n pivector_paths = glob(os.path.join(experiment_path, \"pivectors\", \"*\"))\n\n tsnes = []\n rewards = []\n for generation in GENERATIONS:\n # Find pivector files for specific generation, load them and store points\n generation_paths = [path for path in pivector_paths if \"gen_{}_\".format(generation) in path]\n\n population = [np.load(path) for path in generation_paths]\n population_tsnes = np.array([x[\"tsne\"] for x in population])\n population_rewards = np.array([x[\"average_episodic_reward\"] for x in population])\n tsnes.append(population_tsnes)\n rewards.append(population_rewards)\n\n figure, axs = pyplot.subplots(\n figsize=[2.5 * 3, 2.5],\n nrows=1,\n ncols=len(GENERATIONS),\n sharex=\"all\",\n sharey=\"all\"\n )\n\n min_reward = min(x.min() for x in rewards)\n max_reward = max(x.max() for x in rewards)\n scatter = None\n\n for idx in range(len(GENERATIONS)):\n population_tsne = tsnes[idx]\n population_rewards = rewards[idx]\n generation = GENERATIONS[idx]\n ax = axs[idx]\n\n scatter = ax.scatter(\n population_tsne[:, 0],\n population_tsne[:, 1],\n c=population_rewards,\n vmin=min_reward,\n vmax=max_reward,\n cmap=\"plasma\"\n )\n ax.set_title(\"Generation {}\".format(generation))\n ax.set_xticks([])\n ax.set_yticks([])\n ax.axis(\"off\")\n\n # Making room for colorbar\n # Stackoverflow #13784201\n figure.subplots_adjust(right=1.0)\n cbar = figure.colorbar(scatter)\n cbar.set_ticks([])\n cbar.ax.set_ylabel(\"Reward $\\\\rightarrow$\", rotation=90, fontsize=\"large\")\n\n figure.tight_layout()\n figure.savefig(\"figures/visual_abstract.pdf\", bbox_inches=\"tight\", pad_inches=0.05)", "def get_displacements(ds):\n # Se obtienen una matriz de datos con los desplazamientos promedios de cada imagen\n t = ds['t']\n t = t[:n_im-1]\n t = mplt.dates.date2num(t)\n d = ds['d_t']\n \n # Se grafica la curva Desplazamientos promedios vs Tiempo\n formatter = DateFormatter(\"%d/%m - %H:%M\")\n for i in range(len(d)):\n # Hallando el valor promedio final x zona\n mean_bp = d[i].mean()\n print(\"Valor promedio BP_zona\"+str(i)+\": \",mean_bp)\n print(\"\")\n # Graficando\n direction = 'desplazamientosPromedios_dset'+str(i_o)+'-'+str(i_o+n_im-1)+'_zona'+str(i)+'.png'\n\n fig, ax= plt.subplots(figsize=(10,7))\n ax.plot_date(t,d[i],'b',marker='',markerfacecolor='b',markeredgecolor='b',label='Back Projection')\n ax.set(xlabel='Tiempo',ylabel='Desplazamiento(mm)',title=\"Desplazamientos promedios\\n(Zona \"+str(i)+')')\n ax.xaxis.set_major_formatter(formatter)\n ax.xaxis.set_tick_params(rotation=20)\n #ax.set_xlim([R.min(),R.max()])\n ax.set_ylim([-c*1000/(4*fc*5),c*1000/(4*fc*5)]) # En (mm)\n ax.grid(linestyle='dashed')\n ax.legend()\n plt.show()\n fig.savefig(os.getcwd()+\"/Results/Displacement_BP/\"+direction,orientation='landscape')\n \n return 'Ok'", "def setup(self):\n igd = self.options['input_grid_data']\n ogd = self.options['output_grid_data']\n output_subset = self.options['output_subset']\n\n if ogd is None:\n ogd = igd\n\n # Build the interpolation matrix which maps from the input grid to the output grid.\n # Rather than a single phase-wide interpolating polynomial, map each segment.\n # To do this, find the nodes in the output grid which fall in each segment of the input\n # grid. Then build a Lagrange interpolating polynomial for that segment\n L_blocks = []\n output_nodes_ptau = list(ogd.node_ptau[ogd.subset_node_indices[output_subset]])\n\n for iseg in range(igd.num_segments):\n i1, i2 = igd.segment_indices[iseg]\n iptau_segi = np.take(igd.node_ptau, (i1, i2-1))\n istau_segi = np.take(igd.node_stau, (i1, i2-1))\n\n # The indices of the output grid that fall within this segment of the input grid\n if ogd is igd:\n optau_segi = iptau_segi\n else:\n ptau_hi = igd.segment_ends[iseg+1]\n if iseg < igd.num_segments - 1:\n idxs_in_iseg = np.where(output_nodes_ptau <= ptau_hi)[0]\n else:\n idxs_in_iseg = np.arange(len(output_nodes_ptau))\n optau_segi = np.asarray(output_nodes_ptau)[idxs_in_iseg]\n # Remove the captured nodes so we don't accidentally include them again\n output_nodes_ptau = output_nodes_ptau[len(idxs_in_iseg):]\n\n # Now get the output nodes which fall in iseg in iseg's segment tau space.\n ostau_segi = 2.0 * (optau_segi - iptau_segi[0]) / (iptau_segi[-1] - iptau_segi[0]) - 1\n\n # Create the interpolation matrix and add it to the blocks\n L, _ = lagrange_matrices(istau_segi, ostau_segi)\n L_blocks.append(L)\n\n self.interpolation_matrix = block_diag(*L_blocks)\n r, c = np.nonzero(self.interpolation_matrix)\n\n output_num_nodes, input_num_nodes = self.interpolation_matrix.shape\n\n for (name, kwargs) in self._timeseries_outputs:\n\n input_kwargs = {k: kwargs[k] for k in ('units', 'desc')}\n input_name = 'input_values:{0}'.format(name)\n self.add_input(input_name,\n shape=(input_num_nodes,) + kwargs['shape'],\n **input_kwargs)\n\n output_name = name\n output_kwargs = {k: kwargs[k] for k in ('units', 'desc')}\n output_kwargs['shape'] = (output_num_nodes,) + kwargs['shape']\n self.add_output(output_name, **output_kwargs)\n\n self._vars.append((input_name, output_name, kwargs['shape']))\n\n size = np.prod(kwargs['shape'])\n val_jac = np.zeros((output_num_nodes, size, input_num_nodes, size))\n\n for i in range(size):\n val_jac[:, i, :, i] = self.interpolation_matrix\n\n val_jac = val_jac.reshape((output_num_nodes * size, input_num_nodes * size),\n order='C')\n\n val_jac_rows, val_jac_cols = np.where(val_jac != 0)\n\n rs, cs = val_jac_rows, val_jac_cols\n self.declare_partials(of=output_name,\n wrt=input_name,\n rows=rs, cols=cs, val=val_jac[rs, cs])", "def vis_difference(self):\n print(self.init_vec)\n\n init = self.init_output.numpy()\n\n alphas = np.linspace(0, 1, 20)\n for i, alpha in enumerate(alphas):\n\n display.clear_output(wait=True)\n norm = [torch.linalg.norm(torch.tensor(\n self.init_vec + alpha*self.eigen[i]), axis=1).detach().numpy() for i in range(2)]\n\n diff = np.array([self.compute_difference(\n alpha, self.eigen[i]) for i in range(2)])\n\n fig = plt.figure(figsize=(14, 12), tight_layout=True)\n fig.suptitle(\"Latent direction variation\", fontsize=20)\n gs = gridspec.GridSpec(2, 2)\n\n ax_temp = plt.subplot(gs[0, :])\n ax_temp.scatter(\n init[:, 0], init[:, 1])\n ax_temp.set_title(\"Initial Dataset\")\n ax_temp.set_xlim(-1, 1)\n ax_temp.set_ylim(-1, 1)\n [s.set_visible(False) for s in ax_temp.spines.values()]\n\n for j in range(2):\n ax_temp = plt.subplot(gs[1, j])\n sc = ax_temp.quiver(\n init[:, 0], init[:, 1], diff[j, :, 0], diff[j, :, 1], norm[j])\n sc.set_clim(np.min(norm[j]), np.max(norm[j]))\n plt.colorbar(sc)\n ax_temp.set_title(\n \"Direction: {}, alpha: {}\".format(j+1, alpha))\n ax_temp.set_xlim(-1, 1)\n ax_temp.set_ylim(-1, 1)\n [s.set_visible(False) for s in ax_temp.spines.values()]\n\n plt.savefig(\"frames_dir/fig_{}\".format(i))\n plt.show()", "def vis_latent_space_abs(self):\n gen_input = self.init_vec.detach().numpy()\n\n grid_x, grid_y = np.mgrid[-2:2:200j, -2:2:200j]\n\n gen_input = torch.tensor(\n np.stack([grid_x.reshape(-1,), grid_y.reshape(-1,)], axis=1), requires_grad=False).float()\n gen_output = self.generator(gen_input).detach().numpy()\n\n grid_color = griddata(gen_input, np.abs(gen_output[:, 1])-np.abs(gen_output[:, 0]),\n (grid_x, grid_y), method='cubic')\n\n fig, ax = plt.subplots(figsize=(14, 12))\n col = ax.imshow(grid_color.T, extent=(-2, 2, -2, 2))\n plt.colorbar(col, label='|Y| - |X|')\n plt.title(\"Latent Space coloring (Cross)\")", "def visualise():\n\n column = request.form.getlist('columnName')\n regions = request.form.getlist('raw_regions')\n #take the single string and return a list\n regions = query_proc.prep_regions(regions)\n #get that tables of interst\n table = query_proc.column_to_table(column)\n\n var_data = query_proc.get_region_data(table, column, regions)\n minval = query_proc.get_region_data_min(table, column, regions)\n maxval = query_proc.get_region_data_max(table, column, regions)\n\n #column diction to get human fiendly designation\n column_dict = name_column.get_name_column_dict()\n real_column = column_dict[column[0]]\n\n\n ##packing for the template\n region = regions[0]\n min_max = [minval, maxval]\n step = query_proc.calc_steps(min_max)\n min_max.append(step)\n\n min_max = json.dumps(min_max)\n json_vardata = json.dumps(var_data)\n\n return render_template('visualise.html',\n title='Data on a Map!',\n column=column,\n real_column=real_column,\n region=region,\n min_max=min_max,\n json_vardata=json_vardata)", "def show(self, **kwargs):\n show_gmm_points([(pt, i) for i, pts in enumerate(self.points)\\\n for pt in pts], self.params, **kwargs)", "def __init__(self, constraints=[], infeasiblePoints=[], feasiblePoints=[], optimalPoint=None, costVector=None, zoom=1.0, frameTime=0.0): \n super(PacmanPlotLP, self).__init__(zoom, frameTime)\n\n xmin = 100000\n ymin = 100000\n xmax = -100000\n ymax = -100000\n\n for point in feasiblePoints:\n if point[0] < xmin:\n xmin = point[0]\n if point[0] > xmax:\n xmax = point[0]\n if point[1] < ymin:\n ymin = point[1]\n if point[1] > ymax:\n ymax = point[1]\n\n if len(feasiblePoints) == 0:\n for point in infeasiblePoints:\n if point[0] < xmin:\n xmin = point[0]\n if point[0] > xmax:\n xmax = point[0]\n if point[1] < ymin:\n ymin = point[1]\n if point[1] > ymax:\n ymax = point[1]\n\n xmin = int(math.floor(xmin)) - 3\n ymin = int(math.floor(ymin)) - 3\n xmax = int(math.ceil(xmax)) + 3\n ymax = int(math.ceil(ymax)) + 3\n width = xmax-xmin+1\n height = ymax-ymin+1\n\n# p = feasiblePoints[2]\n# print(\"p={}\".format(p))\n# print(\"feasible={}\".format(self.pointFeasible(p, constraints)))\n# g = self.cartesianToLayout(xmin, ymin, xmax, ymax, p)\n# print(\"g={}\".format(g))\n# gr = (int(round(g[0])), int(round(g[1])))\n# p2 = self.layoutToCartesian(xmin, ymin, xmax, ymax, gr)\n# print(\"p2={}\".format(p2))\n# print(\"p2 feasible={}\".format(self.pointFeasible(p2, constraints)))\n\n layoutLists = self.blankLayoutLists(width, height)\n\n self.addInfeasibleGhosts(layoutLists, constraints, xmin, ymin, xmax, ymax)\n\n layoutLists = self.changeBorderGhostsToWall(layoutLists)\n \n for point in infeasiblePoints:\n self.addCartesianPointToLayout(layoutLists, point, '.', xmin, ymin, xmax, ymax)\n\n for point in feasiblePoints:\n self.addCartesianPointToLayout(layoutLists, point, 'o', xmin, ymin, xmax, ymax)\n\n if optimalPoint is not None:\n self.addCartesianPointToLayout(layoutLists, optimalPoint, 'P', xmin, ymin, xmax, ymax)\n\n if graphicsUtils._canvas is not None:\n graphicsUtils.clear_screen()\n \n # Initialize GameStateData with blank board with axes \n self.width = width\n self.height = height\n\n self.zoom = min(30.0/self.width, 20.0/self.height)\n self.gridSize = graphicsDisplay.DEFAULT_GRID_SIZE * self.zoom\n\n maxNumGhosts = 10000\n layout = Layout(layoutLists)\n self.blankGameState = GameStateData()\n self.blankGameState.initialize(layout, maxNumGhosts)\n self.initialize(self.blankGameState)\n title = 'Pacman Plot LP'\n graphicsUtils.changeText(self.infoPane.scoreText, title)\n graphicsUtils.refresh()\n graphicsUtils.sleep(1)\n\n if costVector is not None:\n self.shadeCost(layoutLists, constraints, costVector, feasiblePoints, xmin, ymin, xmax, ymax)", "def getLocalMap(dist_compl):\n sdc=dist_compl*RES\n #clms are real ;)\n #rws are imaginary :D #rows\n map_padd = 1*RES #add a meter\n rws_ofs = abs(sdc.imag.min())+map_padd #offsetX\n rws = abs(sdc.imag.max())+(rws_ofs)\n clms_ofs = abs(sdc.real.min())+map_padd\n clms = abs(sdc.real.max())+(clms_ofs)\n M = np.zeros((np.round(rws+map_padd).astype(int),np.round(clms+map_padd).astype(int))).astype(dtype=MAP_D_TYPE)#empty local map\n Mg = M.copy()\n points = sdc + np.array([clms_ofs+1j*rws_ofs]) #scale\n #M[points.imag.astype(int),points.real.astype(int)]=10 \n for p in points:\n r=np.round(p.imag).astype(int)\n c=np.round(p.real).astype(int)\n try:\n #draw line in matrix\n lc = [np.round(rws_ofs).astype(int),np.round(clms_ofs).astype(int),r,c]\n rr, cc, val = line_aa(*lc) #not really demaning --> 1%\n M[rr, cc] = np.logical_or(M[rr,cc]>0, val>0) \n #add gaussian\n Mg[r-GPoints//2:r+GPoints//2,c-GPoints//2:c+GPoints//2]+=Gau\n except:\n print('Error: out of array when calculating the local map',r,c)\n Mg[Mg>100]=100 #cap the gaussian matrix\n car_pos_in_loc_mat = np.array([np.round(clms_ofs).astype(int), np.round(rws_ofs).astype(int)])\n #Mg[car_pos_in_loc_mat[1],car_pos_in_loc_mat[0]]=300 #add car pos\n return M*(-100)+Mg, car_pos_in_loc_mat", "def show_dprime(sim_attr_generator):\n#TODO description\n dprime_fnc_list = [\n (sim_attr.id_name,sim_attr.dprime_fnc) for sim_attr in sim_attr_generator\n ]\n\n if Args.mat_file_out != None:\n save_dict = dict()\n else:\n x_axis = int(math.ceil(math.sqrt(len(dprime_fnc_list))))\n y_axis = int(math.ceil(float(len(dprime_fnc_list)) / x_axis))\n fig, axes = plt.subplots(nrows=y_axis,ncols=x_axis)\n\n#? Code duplication\n if len(dprime_fnc_list) == 1:\n id_name, dprime_fnc = dprime_fnc_list[0]\n mesh_X, mesh_Y, mesh_Z = dprime_fnc_to_mesh_grid(\n dprime_fnc, linspace=Args.grid_size\n )\n im = show_plot_imshow_from_mesh(\n axes, mesh_X, mesh_Y, mesh_Z, title=id_name, vmax=Args.upper_bound\n )\n fig.colorbar(im,shrink=0.8)\n plt.show()\n# End code duplication\n return\n\n for i, (id_name, dprime_fnc) in enumerate(dprime_fnc_list):\n mesh_X, mesh_Y, mesh_Z = dprime_fnc_to_mesh_grid(\n dprime_fnc, linspace=Args.grid_size\n )\n if Args.mat_file_out != None:\n dprime_fnc[id_name] = {'X':mesh_X, 'Y':mesh_Y, 'Z':mesh_Z}\n else:\n im = show_plot_imshow_from_mesh(\n axes.flat[i], mesh_X, mesh_Y, mesh_Z, title=id_name, vmax=Args.upper_bound\n )\n if Args.mat_file_out != None:\n scipy.io.savemat(Args.mat_file_out, save_dict)\n else:\n fig.colorbar(im,ax=axes.ravel().tolist(),shrink=0.8)\n plt.show()", "def _plot_base_matrix(self, hv_point_data):\n matrix_plot = self.hv.Segments(hv_point_data.data,\n kdims=['time', 'channel', 'endtime', 'channel']\n ).opts(tools=['hover'],\n aspect=4,\n responsive='width',\n )\n return matrix_plot", "def plot_ratios(path='/Volumes/OptiHDD/data/pylith/3d/agu2014/output',\n\t\t\t\tsteps=['step01','step02'],\n\t\t\t\t#labels='',\n\t\t\t\tshow=True,\n\t\t\t\txscale=1e3,\n\t\t\t\tyscale=1e-2):\n\tplt.figure()\n\t#path = '/Users/scott/Desktop/elastic'\n\n\t# Deep source\n\t#labels = ['no APMB', 'APMB']\n\t#if labels == '':\n\tlabels = steps\n\tdeep = {}\n\t#uzmax = 0.824873455364\n\t# NOT sure why hardcoded...\n\tuzmax = 1\n\tfor i,outdir in enumerate(steps):\n\t\tpointsFile = os.path.join(path, outdir, 'points.h5')\n\t\tprint(pointsFile)\n\t\tx,y,z,ux,uy,uz = pu.extract_points(pointsFile)\n\n\t\tX = x / xscale\n\t\tY1 = ux / yscale\n\n\t\tx_fem = X #/ xscale #double scaling!\n\t\tur_fem = Y1 #/ yscale\n\t\tuz_fem = uz / yscale\n\n\t\t#print(pointsFile)\n\t\tprint(ur_fem.min(), ur_fem.max(), uz_fem.min(), uz_fem.max(), uz_fem.max() / ur_fem.max())\n\n\t\t#normalize\n\t\tuz_fem = uz_fem / uzmax\n\t\tur_fem = ur_fem / uzmax\n\t\tx_fem = x_fem / 30.0\n\n\t\tl, = plt.plot(x_fem,uz_fem,'o-',ms=4,lw=4,label=labels[i])\n\t\tplt.plot(x_fem,ur_fem,'o--',ms=4,lw=4,color=l.get_color()) #mfc='none' transparent\n\t\tdeep[outdir] = uz_fem/uz_fem\n\n\t'''\n\t# Shallow Source\n\tshallow = {}\n\tuzmax = 0.949652827795\n\tfor i,outdir in enumerate(['step11','step12']):\n\t\tpointsFile = os.path.join(path, outdir, 'points.h5')\n\n\t\tx,y,z,ux,uy,uz = pu.extract_points(pointsFile)\n\n\t\tX = x / xscale\n\t\tY1 = ux / yscale\n\n\t\tx_fem = X #/ xscale #double scaling!\n\t\tur_fem = Y1 #/ yscale\n\t\tuz_fem = uz / yscale\n\n\t\t#print(pointsFile)\n\t\tprint(ur_fem.min(), ur_fem.max(), uz_fem.min(), uz_fem.max(), uz_fem.max() / ur_fem.max())\n\n\t#normalize\n\tuz_fem = uz_fem / uzmax\n\tur_fem = ur_fem / uzmax\n\tx_fem = x_fem / 20.0\n\n\t\tl, = plt.plot(x_fem,uz_fem,'.-', mfc='w', lw=4,label=labels[i])\n\t\tplt.plot(x_fem,ur_fem,'.--',lw=4, mfc='w',color=l.get_color()) #mfc='none' transparent\n\n\t\tshallow[outdir] = uz_fem/ur_fem\n\t'''\n\n\t# Annotate\n\tplt.axhline(color='k',lw=0.5)\n\t#plt.xlabel('Distance [{}]'.format(get_unit(xscale)))\n\t#plt.ylabel('Displacement [{}]'.format(get_unit(yscale)))\n\tplt.legend()\n\tplt.grid()\n\t#plt.ylim(-0.5, 3.5)\n\t#plt.savefig('deep.png',bbox_inches='tight')\n\t#plt.savefig('shallow.png',bbox_inches='tight')\n\n\t# normalized\n\tplt.ylim(-0.5, 4)\n\tplt.xlim(0,10)\n\tplt.xlabel('Normalized Radial Distance [R / D]')\n\tplt.ylabel('Normalized Displacement [U / Uz_max]')\n\t#plt.savefig('normalized_deep.png',bbox_inches='tight')\n\tplt.savefig('normalized_shallow.png',bbox_inches='tight')\n\n\n\t# Plot ratios of uz versus NOTE: this plot is confusing,,, just keep ratio of uz_max to ur_max\n\t'''\n\tplt.figure()\n\tplt.plot(x_fem, deep['step01'], label='Deep no APMB')\n\tplt.plot(x_fem, deep['step02'], label='Deep w/ APMB')\n\tplt.plot(x_fem, shallow['step11'], label='Shallow no APMB')\n\tplt.plot(x_fem, shallow['step12'], label='Shallow w/ APMB')\n\tplt.xlabel('Distance [km]') #NOTE: maybe plot normailzed X-axis (R-d)\n\t#plt.xlabel('Normalized Distance [R/d]')\n\tplt.ylabel('Ratio [Uz/Ur]')\n\tplt.title('Ratio of vertical to radial displacement')\n\tplt.legend()\n\tplt.show()\n\t'''", "def __init__(self, Rect0=(0,0),Rect1=(1,1),Qw=1000,Qe=(250,250,250,250),h=10,phi=0.2):\n #Cornor Points\n self.Pts=[(Rect0[0],Rect0[1]),(Rect0[0],Rect1[1]),(Rect1[0],Rect1[1]),(Rect1[0],Rect0[1])]\n \n #Center Well\n self.Pts.append(((Rect0[0]+Rect1[0])/2,(Rect0[1]+Rect1[1])/2) )\n \n self.Qw=Qw\n self.Qe=Qe\n self.Qe_int=np.zeros(4)\n self.h=h\n self.phi=phi\n self.theta=[np.pi/2,np.pi/2,np.pi/2,np.pi/2]\n \n #Subgrid\n self.SubGrids=[]\n self.NeighborID=[(1,3),(2,0),(3,1),(0,2)]\n \n #Streamline\n self.SL=[]\n self.TOF=[]", "def plotFilterOutputs(laplace_in=None, time_domain_fn=None, \n lp_range=(0,1e-3), hp_range=(0,1e-3), points=1e3,\n input_name=\"Input\"):\n \n t_lp = linspace(*lp_range,points)\n t_hp = linspace(*hp_range,points)\n\n \n if laplace_in != None:\n \n A,b,V_lowpass = lowpass(Vi=laplace_in)\n t_lp,y_lp = inverseLaplace(V_lowpass[-1],t=t_lp)\n \n \n A,b,V_highpass = highpass(Vi=laplace_in)\n t_hp,y_hp = inverseLaplace(V_highpass[-1],t=t_hp)\n \n elif time_domain_fn != None:\n \n A,b,V_lowpass = lowpass()\n lowsys = symToTransferFn(V_lowpass[-1])\n t_lp,y_lp,svec = sp.lsim(lowsys, time_domain_fn(t_lp), t_lp)\n \n \n A,b,V_highpass = highpass()\n highsys = symToTransferFn(V_highpass[-1])\n t_hp,y_hp,svec = sp.lsim(highsys, time_domain_fn(t_hp), t_hp)\n \n else:\n print(\"No input given.\")\n \n \n fig,axes = plt.subplots(1,2,figsize=(18,6))\n ax1,ax2 = axes[0],axes[1]\n \n # low pass response plot\n ax1.set_ylabel('$V_o$')\n ax1.set_xlabel('$t$')\n ax1.plot(t_lp,y_lp)\n ax1.grid()\n ax1.set_title(\"Response of low pass filter to {}\".format(input_name))\n \n # high pass response plot\n ax2.set_ylabel('$V_o$')\n ax2.set_xlabel('$t$')\n ax2.plot(t_hp,y_hp)\n ax2.grid()\n ax2.set_title(\"Response of high pass filter to {}\".format(input_name))\n \n plt.show()\n return t_lp,y_lp, t_hp,y_hp", "def shifts_projection(sc, clean):\n def shifts_projected(clean, axis):\n projected = clean.map(lambda x: x.mean(axis=axis)[:, :, np.newaxis])\n target = getTarget(projected, 30, 1)\n shifts = registerByPlane(sc, projected, target[:, :, np.newaxis], 10, False)\n return shifts[:, :, 0]\n\n # shifts_xy = shifts_projected(clean, 2)\n shifts_xz = shifts_projected(clean, 1)\n shifts_yz = shifts_projected(clean, 0)\n\n # x_shifts = np.mean(np.stack((shifts_xz[:, 0], shifts_xy[:, 0])), axis=0)\n z_shifts = np.mean(np.stack((shifts_xz[:, 1], shifts_yz[:, 1])), axis=0)\n # y_shifts = np.mean(np.stack((shifts_yz[:, 0], shifts_xy[:, 1])), axis=0)\n plt.figure()\n plt.plot(shifts_xz[:, 1])\n plt.plot(shifts_yz[:, 1])\n plt.plot(z_shifts)\n plt.title('Z')\n # plt.figure()\n # plt.plot(shifts_xz[:, 0])\n # plt.plot(shifts_xy[:, 0])\n # plt.plot(x_shifts)\n # plt.title('X')\n # plt.figure()\n # plt.plot(shifts_yz[:, 0])\n # plt.plot(shifts_xy[:, 1])\n # plt.plot(y_shifts)\n # plt.title('Y')\n # shifts_all = np.stack((x_shifts, y_shifts, z_shifts))\n\n def initReg(kv):\n from scipy.ndimage.interpolation import shift\n index, volume = kv\n current_shift = (0, 0, -1 * z_shifts[int(index[0])])\n shifted = shift(volume, current_shift)\n return shifted.astype(np.int16)\n\n reg = clean.map(initReg, with_keys=True, value_shape=clean.shape[1:], dtype=np.int16)\n reg.cache()\n reg.count()\n return reg", "def CreateLandmask(Fieldset, test = False):\n \n \n \"\"\"\n This first set of lines creates a numpy array with u velocities and a numpy\n array with v velocities. First we get the U and V fields from the dataset. Then\n we compute a time chunk, which is needed because of the dataset. Then we only\n take the first slice of the U and V field (we do not need more for finding the land\n and ocean grids). As last we make an empty array which will be filled with zeros and \n ones.\n \"\"\"\n fU = Fieldset.U\n fV = Fieldset.V\n Fieldset.computeTimeChunk(fU.grid.time[0], 1) \n uvel_mask_c = fU.data[0,:,:] \n vvel_mask_c = fV.data[0,:,:]\n# vvel_mask_c = np.roll(vvel_mask_c, 1, axis = 0)\n landmask = np.zeros((uvel_mask_c.shape[0], uvel_mask_c.shape[1]))\n \n \"\"\"\n The first loop checks the value of the u and v velocitites. Notice that we get the\n values of two adjacent grid, since we're working with a C-grid.\n Visualizations of velocities in the C-grids(see below). So for a grid to be flagged identified\n as a land grid two U velocities and 2 V velocities need to be zero. The first loop makes all\n ocean grids 1 and land grids 0. \n ____ ____ ____ ____\n | V | V | \n | | | \n U T U T U\n | | | \n |____V____|_____V_____| \n \"\"\"\n \n for i in range (len(landmask[:,0])-1):\n for j in range (len(landmask[0,:])-1):\n u1 = uvel_mask_c[i,j]\n\n u2 = uvel_mask_c[i,j+1]\n\n v1 = vvel_mask_c[i,j]\n\n v2 = vvel_mask_c[i+1,j]\n\n if u1 != 0 or u2 != 0 or v1 != 0 or v2 != 0:\n landmask[i,j] = 1\n \n \n \"\"\"\n Change all zero to 1 and rest 0. since we want the land grids to be 1 and ocean\n grids to be 0. \n \"\"\"\n \n landmask = ChangeValues(landmask,0,1) \n \n \"\"\"\n The created landmask needs to be shifted upwards one grid. We will\n use the numpy roll function to do this.\n \"\"\"\n \n if test == True:\n plt.figure()\n plt.imshow(landmask)\n plt.colorbar()\n \n return landmask", "def display4(*args):\n #-------------------- unpack\n twiss_func = args[0]\n cos_like = args[1]\n sin_like = args[2]\n lat_plot = args[3]\n #-------------------- beta x,y & dispersion x\n s = [twiss_func(i,'s') for i in range(twiss_func.nbpoints)] # Abszisse\n bx = [twiss_func(i,'bx') for i in range(twiss_func.nbpoints)] # beta x\n by = [twiss_func(i,'by') for i in range(twiss_func.nbpoints)] # beta y\n dx = [twiss_func(i,'dx') for i in range(twiss_func.nbpoints)] # dispersion x\n#-------------------- longitudinal trajectories\n z1= [cos_like(i,'s') for i in range(cos_like.nbpoints)]\n cz= [cos_like(i,'cz') for i in range(cos_like.nbpoints)]\n cdp= [cos_like(i,'cdp') for i in range(cos_like.nbpoints)]\n\n z2= [sin_like(i,'s') for i in range(sin_like.nbpoints)]\n sz= [sin_like(i,'sz') for i in range(sin_like.nbpoints)]\n sdp= [sin_like(i,'sdp') for i in range(sin_like.nbpoints)]\n #-------------------- lattice viseo\n vzero = [0. for i in range(lat_plot.nbpoints)] # zero line\n vis_abszisse = [lat_plot(i,'s') for i in range(lat_plot.nbpoints)]\n vis_ordinate = [lat_plot(i,'viseo') for i in range(lat_plot.nbpoints)]\n #-------------------- figure frame\n width=14; height=7.6\n # fighdr = 'lattice version = {}, input file = {}'.format(PARAMS['lattice_version'],PARAMS['input_file'])\n fig = plt.figure(num=1,figsize=(width,height),facecolor='#eaecef',tight_layout=False)\n\n #-------------------- beta functions\n splot211=plt.subplot(211)\n splot211.set_title('beta x,y')\n # mapping box\n splot211.text(0.01, 1.1, UTIL.FLAGS.get('mapping'),transform=splot211.transAxes,fontsize=8,bbox=dict(boxstyle='round',facecolor='wheat',alpha=0.5),verticalalignment='top')\n # function plots\n plt.plot(s,bx, label=r\"$\\beta$x [m]\", color='black', linestyle='-')\n plt.plot(s,by, label=r\"$\\beta$y [m]\", color='red', linestyle='-')\n plt.plot(s,dx, label=r'$\\eta_x$ [m]' , color='green', linestyle='-') # dispersion x\n vscale=splot211.axis()[3]*0.25\n viseoz = [x*vscale for x in vis_ordinate]\n plt.plot(vis_abszisse,viseoz,label='',color='black')\n plt.plot(vis_abszisse,vzero,color='green',linestyle='--')\n # zero line\n splot211.plot(vis_abszisse,vzero,color='green',linestyle='--')\n plt.legend(loc='lower right',fontsize='x-small')\n\n #-------------------- longitudinal tracks z, dP/P\n # ax_l = left abszisse\n ax_l=plt.subplot(212)\n # ax_l=plt.subplot(10,1,(7,9))\n ax_l.set_title('synchrotron oscillation')\n ax_l.set_ylabel(r\"z [mm]\")\n ax_l.tick_params(axis='y', colors='green')\n ax_l.yaxis.label.set_color('green')\n ax_l.plot(z1,cz,label='C',color='green')\n ax_l.plot(z2,sz,label='S',color='green',linestyle=':')\n plt.legend(loc='lower left',fontsize='x-small')\n # ax_r = right abszisse\n ax_r = ax_l.twinx()\n ax_r.set_ylabel(r'$\\Delta$p/p [%]')\n ax_r.tick_params(axis='y', colors='red')\n ax_r.yaxis.label.set_color('red')\n ax_r.plot(z2,cdp,label='C',color='red')\n ax_r.plot(z2,sdp,label='S',color='red',linestyle=':')\n ax_r.plot(vis_abszisse,vzero,color='red', linestyle='--')\n plt.legend(loc='lower right',fontsize='x-small')\n # lattice elements\n vscale=ax_l.axis()[3]*0.25\n viseoz = [x*vscale for x in vis_ordinate]\n ax_l.plot(vis_abszisse,viseoz,label='',color='black')\n ax_l.plot(vis_abszisse,vzero,color='green',linestyle='--')", "def get_monthly_prism_ppt_data(year,month, plotPPTBounds):\n \"\"\" It is in the form of grid \"\"\"\n \n if(month<10):\n prism_file_path = \"PRISM_ppt_stable_4kmM3_\"+str(year)+\"0\"+str(month)+\"_bil.bil\"\n else:\n prism_file_path = \"PRISM_ppt_stable_4kmM3_\"+str(year)+str(month)+\"_bil.bil\" \n \n ppt_data = read_prism_bil(join(cf.root, cf.prism_dir, prism_file_path))\n \n hdr_dict = read_prism_hdr(join(cf.root, cf.prism_dir, prism_file_path).replace('.bil', '.hdr'))\n \n hdr_dict[\"ULXMAP\"] = float(hdr_dict[\"ULXMAP\"])\n hdr_dict[\"ULYMAP\"] = float(hdr_dict[\"ULYMAP\"])\n hdr_dict['NROWS'] = int(hdr_dict['NROWS'])\n hdr_dict['NCOLS'] = int(hdr_dict['NCOLS'])\n hdr_dict['XDIM'] = float(hdr_dict['XDIM'])\n hdr_dict['YDIM'] = float(hdr_dict['YDIM'])\n \n p1 = (hdr_dict[\"ULXMAP\"] - (hdr_dict['XDIM']/2), \n hdr_dict[\"ULYMAP\"] + (hdr_dict['YDIM']/2))\n\n p2 = (p1[0] + (hdr_dict['NCOLS']*hdr_dict['XDIM']),\n p1[1])\n\n p3 = (p2[0],\n p2[1] - (hdr_dict['NROWS']*hdr_dict['YDIM']))\n\n p4 = (p1[0],\n p3[1])\n \n lon_point_list = (p1[0], p2[0], p3[0], p4[0])\n lat_point_list = (p1[1], p2[1], p3[1], p4[1])\n \n ppt_bounds = Polygon(zip(lon_point_list, lat_point_list))\n \n if(plotPPTBounds):\n crs = {'init': 'epsg:4326'}\n m = folium.Map(zoom_start=10, tiles='cartodbpositron')\n polygon = gpd.GeoDataFrame(index=[0], crs=crs, geometry=[ppt_bounds]) \n \n folium.GeoJson(polygon).add_to(m)\n folium.LatLngPopup().add_to(m)\n m.save(\"Prism Bounds.html\")\n\n return ppt_bounds, ppt_data, hdr_dict", "def plot_LvsL_multiple(line1='CIV1548',line2='CIII1908',line1range=[1e3,1e8],line2range=[1e0,1e8],\n outputdir='./',verbose=True):\n modeldata = nm.load_model('combined',verbose=verbose)\n\n if verbose: print(' - Putting together permutations of chosen setups for plotting')\n infodic = {}\n infodic['Zgas'] = [False,0.0001,0.006,0.040], True\n infodic['logUs'] = [False,-1.0,-2.5,-4.0] , False\n infodic['xid'] = [False,0.1,0.3,0.5] , False\n infodic['nh'] = [False,10,100,1000,10000] , False\n infodic['CO'] = [False,0.1,0.38,1.4] , False\n infodic['Mcut'] = [False,100,300] , False\n\n variables = [infodic['Zgas'][0],infodic['logUs'][0],infodic['xid'][0],\n infodic['nh'][0],infodic['CO'][0],infodic['Mcut'][0]]\n\n permutations = list(itertools.product(*variables))\n permutations_with2false = [sublist for sublist in permutations if sublist.count(False) == 2.]\n Nplots = len(permutations_with2false)\n\n if verbose: print(' - With the restriction Nfalse=2 the setup will results in '+str(Nplots)+\\\n ' plots (if model data allows)')\n if verbose: print(' - These will be saved to the output directory: '+outputdir)\n for pp, perm in enumerate(permutations_with2false):\n Zval = perm[0]\n Uval = perm[1]\n Xival = perm[2]\n Nhval = perm[3]\n COval = perm[4]\n Mval = perm[5]\n\n plotname = outputdir+'NEOGALmodelgrid_Zgas'+str(Zval).replace('.','p')+\\\n '_logU'+str(Uval).replace('.','p')+\\\n '_xid'+str(Xival).replace('.','p')+\\\n '_nH'+str(Nhval).replace('.','p')+\\\n '_CO'+str(COval).replace('.','p')+\\\n '_Mcut'+str(Mval).replace('.','p')+'.pdf'\n\n plotname = plotname.replace('False','Free')\n\n if verbose:\n plotno = pp+1\n infostr = ' - Generating plot '+str(\"%.4d\" % plotno)+'/'+str(\"%.4d\" % Nplots)+': '+plotname.split('/')[-1]+' '\n sys.stdout.write(\"%s\\r\" % infostr)\n sys.stdout.flush()\n\n if not Zval:\n logp1 = True\n else:\n logp1 = False\n\n nm.plot_LvsL(modeldata,line1=line1,line2=line2,logx=True,logy=True,logp1=logp1,logp2=False,verbose=False,\n Zgas=Zval,logU=Uval,xid=Xival,nh=Nhval,COratio=COval,Mcutoff=Mval,\n fixxrange=line1range,fixyrange=line2range,plotname=plotname)\n\n print('\\n ... done')", "def gelplot_imshow(distances, bandwidths, intensities, lanes, names,\n gel_len, wellx, welly, wellsep, res, cursor_ovr,\n back_col, band_col, well_col, noise, Itol, title,\n FWTM, show=True):\n nlanes = len(lanes)\n gel_width = sum(wellx) + (nlanes+1)*wellsep # cm\n res = res.to('px/cm')\n pxl_x = int(round(gel_width * res))\n pxl_y = int(round(gel_len * res))\n centers = [(l+1)*wellsep + sum(wellx[:l]) + 0.5*wellx[l]\n for l in xrange(nlanes)]\n rgb_arr = np.zeros(shape=(pxl_y, pxl_x, 3), dtype=np.float32)\n bandlengths = wellx\n # Paint the bands\n for i in xrange(nlanes):\n distXmid = centers[i]\n pxlXmid = distXmid * res\n bandlength = bandlengths[i]\n from_x = int(round((distXmid - bandlength/2.0) * res))\n to_x = int(round((distXmid + bandlength/2.0) * res))\n for j in xrange(len(lanes[i])):\n distYmid = distances[i][j]\n pxlYmid = int(round(distYmid * res))\n bandwidth = bandwidths[i][j] # w=FWHM or w=FWTM ???\n if FWTM:\n FWHM = Gauss_FWHM(bandwidth)\n else:\n FWHM = bandwidth\n std_dev = Gauss_dev(FWHM)\n maxI = intensities[i][j]\n midI = Gaussian(distYmid, maxI, distYmid, std_dev)\n if pxlYmid < len(rgb_arr): # band within gel frontiers\n rgb_arr[pxlYmid, from_x:to_x] += midI\n bckwdYstop = False if pxlYmid > 0 else True\n forwdYstop = False if pxlYmid < len(rgb_arr)-1 else True\n pxlYbck = pxlYmid-1\n pxlYfor = pxlYmid+1\n while not bckwdYstop or not forwdYstop:\n if not bckwdYstop:\n distYbck = Q_(pxlYbck,'px')/res\n bckYI = Gaussian(distYbck, maxI, distYmid, std_dev)\n if pxlYbck < len(rgb_arr):\n rgb_arr[pxlYbck, from_x:to_x] += bckYI\n pxlYbck -= 1\n if bckYI <= Itol or pxlYbck == -1:\n bckwdYstop = True\n if not forwdYstop:\n distYfor = Q_(pxlYfor,'px')/res\n forYI = Gaussian(distYfor, maxI, distYmid, std_dev)\n rgb_arr[pxlYfor, from_x:to_x] += forYI\n pxlYfor += 1\n if forYI <= Itol or pxlYfor == pxl_y:\n forwdYstop = True\n # Background color\n if noise is None or noise <= 0:\n rgb_arr += back_col\n else:\n bckg = np.random.normal(back_col, noise,\n (len(rgb_arr), len(rgb_arr[0])))\n rgb_arr += bckg[:,:,np.newaxis]\n # Saturation\n rgb_arr[rgb_arr > 1] = 1\n rgb_arr[rgb_arr < 0] = 0\n #bands_arr = np.ma.masked_where(rgb_arr == back_col, rgb_arr) #############\n bands_arr = rgb_arr\n # Plot\n gel_len = gel_len.magnitude\n gel_width = gel_width.magnitude\n wellx = wellx.magnitude\n welly = welly.magnitude\n wellsep = wellsep.magnitude\n centers = [c.magnitude for c in centers]\n bandlengths = bandlengths.magnitude\n bandwidths = [[bw.magnitude for bw in bwlane] for bwlane in bandwidths]\n fig = plt.figure()\n ax1 = fig.add_subplot(111, axisbg=str(back_col))\n ax1.xaxis.tick_top()\n ax1.yaxis.set_ticks_position('left')\n ax1.spines['left'].set_position(('outward', 8))\n ax1.spines['left'].set_bounds(0, gel_len)\n ax1.spines['right'].set_visible(False)\n ax1.spines['bottom'].set_visible(False)\n ax1.spines['top'].set_visible(False)\n ax1.spines['right'].set_color(str(back_col))\n ax1.spines['bottom'].set_color(str(back_col))\n ax1.xaxis.set_label_position('top')\n plt.xticks(centers, names)\n majorLocator = FixedLocator(range(int(gel_len+1)))\n minorLocator = FixedLocator([j/10.0 for k in range(0, int(gel_len+1)*10, 10)\n for j in range(1+k, 10+k, 1)])\n ax1.yaxis.set_major_locator(majorLocator)\n ax1.yaxis.set_minor_locator(minorLocator)\n ax1.tick_params(axis='x', which='both', top='off')\n bands_plt = ax1.imshow(bands_arr, extent=[0, gel_width, gel_len, 0],\n interpolation='none')\n # Draw wells\n for i in xrange(nlanes):\n ctr = centers[i]\n wx = wellx[i]\n wy = welly[i]\n ax1.fill_between(x=[ctr-wx/2, ctr+wx/2], y1=[0,0],\n y2=[-wy, -wy], color=str(well_col))\n # Invisible rectangles overlapping the bands for datacursor to detect\n bands = []\n for i in xrange(nlanes):\n bandlength = bandlengths[i]\n center = centers[i]\n x = center - bandlength/2.0\n for j in xrange(len(lanes[i])):\n dna_frag = lanes[i][j]\n bandwidth = bandwidths[i][j]\n dist = distances[i][j].magnitude\n y = dist - bandwidth/2.0\n band = plt.Rectangle((x,y), bandlength, bandwidth, fc='r',\n alpha=0, label='{} bp'.format(len(dna_frag)))\n plt.gca().add_patch(band)\n bands.append(band)\n plt.ylim(gel_len, -max(welly))\n xlim = sum(wellx) + (nlanes+1)*wellsep\n plt.xlim(0, xlim)\n plt.ylabel('Distance (cm)')\n plt.xlabel('Lanes')\n bbox_args = dict(boxstyle='round,pad=0.6', fc='none')\n an1 = plt.annotate(title, xy=(0,0),\n xytext=(xlim+0.4, (gel_len+max(welly))/2.0),\n va=\"center\", bbox=bbox_args)\n an1.draggable()\n plt.gca().set_aspect('equal', adjustable='box')\n cursor_args = dict(display='multiple',\n draggable=True,\n hover = False,\n bbox=dict(fc='white'),\n arrowprops=dict(arrowstyle='simple',\n fc='white', alpha=0.5),\n xytext=(15, -15),\n formatter='{label}'.format)\n if cursor_ovr:\n for key in cursor_ovr:\n cursor_args[key] = cursor_ovr[key]\n if cursor_args['hover'] == True: cursor_args['display'] = 'single'\n datacursor(bands, **cursor_args)\n #fig.savefig('example.png', dpi=300)\n if show: plt.show()\n return plt", "def makeGrid(self):\n self.h = self.step_x\n self.k = self.step_t\n self.t, self.x = np.meshgrid(np.arange(self.min_t, self.max_t, self.step_t), np.arange(self.min_x, self.max_x\n , self.step_x))", "def Map_Gradients(post_eval,q,InvV,m_points):\n m = InvV.n\n N = m_points.num\n d = InvV.d\n \n ds_dq = np.zeros([m,N])\n dr_dq = np.zeros([m,N])\n \n ds_db = np.zeros([m,d,N])\n dr_db = np.zeros([m,d,N])\n \n ds_dL = np.zeros([m,d,d,N])\n dr_dL = np.zeros([m,d,d,N])\n \n dB_dL = np.zeros([m,d,d,d,N])\n dM_dL = Cholesky_Derivs(InvV,m_points)\n Q = Partitioner(q, InvV, post_eval, m_points)\n \n for j in range(m):\n backtrack = m_points.map(InvV,j)\n ds_dq[j,:] = - Q[j,:] / q[j]\n dr_dq[j,:] = ds_dq[j,:] - np.mean(ds_dq[j,:])\n \n for k in range(d):\n ds_db[j,k,:] = Q[j,:] * backtrack.all[:,k].T\n dr_db[j,k,:] = ds_db[j,k,:] - np.mean(ds_db[j,k,:])\n \n for l in range(d):\n for i in range(N):\n for row in range(d):\n for col in range(d):\n dB_dL[j,row,k,l,i] += m_points.pick(i)[col] * dM_dL[j,row,col,k,l]\n ds_dL[j,k,l,i] = Q[j,i] * np.inner(backtrack.pick(i),dB_dL[j,:,k,l,i])\n if k == l:\n ds_dL[j,k,l,:] += (2/InvV.L[j,k,l])\n \n dr_dL[j,k,l,:] = ds_dL[j,k,l,:] - np.mean(ds_dL[j,k,l,:])\n \n return dr_dq, dr_db, dr_dL", "def showAsPointsInterpolated(self, lToRRatio = 2.0):\n MonkeyPatchMayaVi()\n import enthought.mayavi.mlab as mlab\n from mayavi import mlab\n \n @mlab.show\n def _showSimple():\n maxInterpolPts = 10\n \n def interpolateSection(section):\n sStart = section.getDistalNPA4()\n sEnd = section.getProximalNPA4()\n length = section.getLength()\n rad = min(section.d_r, section.p_r) \n n = min( max( int( lToRRatio * length / rad ), 1 ), maxInterpolPts)\n jVecSteps = ( sEnd-sStart ) / n\n \n intPts = [ sStart + k*jVecSteps for k in range(0,n) ]\n return intPts \n \n lbs = []\n for morph in self.morphs:\n lb = Flatten( ListBuilderSectionVisitor(functor=interpolateSection, morph=morph ) () ) \n lbs.extend( lb )\n \n \n pts = numpy.array( lbs )\n\n x = pts[:, 0]\n y = pts[:, 1]\n z = pts[:, 2]\n s = pts[:, 3]\n \n mlab.points3d(x, y, z, s, colormap=self.colormap, scale_factor=self.scale_factor)\n mlab.outline()\n _showSimple()", "def plot(self, **kwargs):\n\n # get colors\n colors = kwargs.get(\"colors\", GW_OBSERVATORY_COLORS)\n\n # get Result samples\n self._samples = {\n label: value.posterior\n for label, value in self.results.items()\n if isinstance(value, Result)\n }\n\n # get Grid posteriors\n self._grids = {\n label: [value, value.ln_evidence] # store grid and log evidence\n for label, value in self.results.items()\n if isinstance(value, Grid)\n }\n\n # apply offsets for slightly nicer plots axes\n self.parameter_offsets = {parameter: 0.0 for parameter in self.parameters}\n if len(self._grids) == 0 and len(self._samples) == 1:\n for label in self._samples:\n for parameter in self.parameters:\n srange = [\n np.min(self._samples[label][parameter]),\n np.max(self._samples[label][parameter]),\n ]\n label_suffix = \"\"\n\n # offset values\n median = np.median(self._samples[label][parameter])\n relwidth = np.abs((srange[1] - srange[0]) / median)\n\n if relwidth < 1e-4:\n offsetstr = f\"{median:.4e}\"\n a, b = offsetstr.split(\"e\")\n\n if np.abs(int(b)) < 3:\n offsetstr = f\"{median:.4f}\"\n offset = float(offsetstr)\n else:\n offset = float(offsetstr)\n offsetstr = a + rf\"\\!\\times\\!10^{{{int(b)}}}\"\n\n self.parameter_offsets[parameter] = offset\n\n self._samples[label][parameter] -= offset\n label_suffix = rf\" [${{\\scriptstyle {offsetstr}}}$]\"\n\n self.latex_labels[parameter] += label_suffix\n\n colordicts = []\n for j, res in enumerate([self._samples, self._grids]):\n colordicts.append({})\n for i, key in enumerate(res):\n if key in colors:\n colordicts[-1][key] = colors[key]\n elif key.lower() == \"joint\":\n # if using \"Joint\" as the multi-detector analysis key, set the color to black\n colordicts[-1][key] = \"k\"\n else:\n # use PESummary color cycle\n colordicts[-1][key] = list(colorcycle)[\n (j * 2 + i) % len(colorcycle)\n ]\n\n # store original keywords arguments\n origkwargs = kwargs.copy()\n\n # plot samples\n fig = None\n if len(self._samples) > 0:\n kwargs[\"colors\"] = list(colordicts[0].values())\n if self._num_parameters == 1:\n fig = self._1d_plot_samples(**kwargs)\n elif self._num_parameters == 2 and self.plottype != \"corner\":\n fig = self._2d_plot_samples(**kwargs)\n else:\n fig = self._nd_plot_samples(**kwargs)\n\n # restore keywords\n kwargs = origkwargs\n\n if len(self._grids) > 0:\n kwargs[\"colors\"] = list(colordicts[1].values())\n if fig is not None and \"fig\" not in kwargs:\n kwargs[\"fig\"] = fig\n if self._num_parameters == 1:\n fig = self._1d_plot_grid(**kwargs)\n elif self._num_parameters == 2 and self.plottype != \"corner\":\n fig = self._2d_plot_grid(**kwargs)\n else:\n fig = self._nd_plot_grid(**kwargs)\n\n # add further figure information\n if self._num_parameters == 1:\n ax = fig.gca()\n\n # set figure bounds if outside defaults\n if self.parameters[0] in DEFAULT_BOUNDS:\n _set_axes_limits(ax, self.parameters[0], axis=\"x\")\n\n # add injection values\n if self.injection_parameters is not None:\n if self.injection_parameters[self.parameters[0]] is not None:\n ax.axvline(\n (\n self.injection_parameters[self.parameters[0]]\n - self.parameter_offsets[self.parameters[0]]\n ),\n color=kwargs.get(\"injection_color\", \"k\"),\n linewidth=1,\n )\n elif self._num_parameters == 2:\n if \"triangle\" in self.plottype:\n a1, a2, a3 = fig[1:]\n order = [\"x\", \"y\"] if self.plottype == \"triangle\" else [\"y\", \"x\"]\n params = (\n self.parameters[:2]\n if self.plottype == \"triangle\"\n else self.parameters[1::-1]\n )\n\n # set figure bounds if outside defaults\n for param, axes, axis in zip(params, [[a1, a2], [a2, a3]], order):\n for ax in axes:\n _set_axes_limits(ax, param, axis=axis)\n\n self.fig = fig\n return self.fig", "def plot_attribute_surface(self, z_source, x_min = -3., x_max = 3., y_min = -5., y_max = 5., dim1=0, dim2=1, grid_res=0.5):\n # create the dataspace\n x1 = torch.arange(x_min, x_max, grid_res)\n x2 = torch.arange(y_min, y_max, grid_res)\n z1, z2 = torch.meshgrid([x1, x2])\n\n num_points = z1.size(0) * z1.size(1)\n\n # z = torch.randn(1, self.model.latent_space_dim)\n z = z_source\n \n z = z.repeat(num_points, 1)\n\n z[:, dim1] = z1.contiguous().view(1, -1)\n z[:, dim2] = z2.contiguous().view(1, -1)\n z = to_cuda_variable(z)\n # pass the points through the model decoder\n mini_batch_size = 1\n num_mini_batches = num_points // mini_batch_size\n\n nd_all = []\n nr_all = []\n rc_all = []\n aij_all = []\n # ie_all = []\n\n for i in tqdm(range(num_mini_batches)):\n\n # if i > 0:\n # break\n z_batch = z[i*mini_batch_size:(i+1)*mini_batch_size, :]\n dummy_score_tensor = to_cuda_variable(torch.zeros(z_batch.size(0), self.measure_seq_len))\n _, samples = self.model.decoder(\n z=z_batch,\n score_tensor=dummy_score_tensor,\n train=self.train\n )\n samples = samples.view(z_batch.size(0), -1)\n note_density = self.dataset.get_notes_density_in_measure(samples)\n note_range = self.dataset.get_note_range_of_measure(samples)\n rhy_complexity = self.dataset.get_rhy_complexity(samples)\n avg_interval_jump = self.dataset.get_average_pitch_interval_of_measure(samples)\n\n # interval_entropy = self.dataset.get_interval_entropy(samples)\n nd_all.append(note_density)\n nr_all.append(note_range)\n rc_all.append(rhy_complexity)\n aij_all.append(avg_interval_jump)\n # ie_all.append(interval_entropy)\n\n nd_all = to_numpy(torch.cat(nd_all, 0))\n nr_all = to_numpy(torch.cat(nr_all, 0))\n rc_all = to_numpy(torch.cat(rc_all, 0))\n aij_all = to_numpy(torch.cat(aij_all, 0))\n\n print(nd_all.shape)\n print(nr_all.shape)\n print(rc_all.shape)\n print(aij_all.shape)\n\n # ie_all = to_numpy(torch.cat(ie_all, 0))\n z = to_numpy(z)\n if self.trainer_config == '':\n reg_str = '[no_reg]'\n else:\n reg_str = self.trainer_config\n\n # filename = self.dir_path + '/plots_avgint_upscaled/' + reg_str + 'attr_surf_rhy_complexity_[' \\\n # + str(dim1) + ',' + str(dim2) + '].png'\n # self.plot_dim(z, rc_all, filename, dim1=dim1, dim2=dim2)\n\n # filename = self.dir_path + '/plots_avgint_upscaled/' + reg_str + 'attr_surf_note_range_[' \\\n # + str(dim1) + ',' + str(dim2) + '].png'\n # self.plot_dim(z, nr_all, filename, dim1=dim1, dim2=dim2)\n\n\n\n filename = self.dir_path + '/plots_avgint_upscaled/' + reg_str + 'attr_surf_note_density_[' \\\n + str(dim1) + ',' + str(dim2) + ']_3.png'\n self.plot_dim(z, nd_all, filename, dim1=dim1, dim2=dim2)\n\n filename = self.dir_path + '/plots_avgint_upscaled/' + reg_str + 'attr_surf_avg_interval_jump_[' \\\n + str(dim1) + ',' + str(dim2) + ']_3.png'\n self.plot_dim(z, aij_all, filename, dim1=dim1, dim2=dim2)", "def show_grid(self):\n\n if not os.path.exists(self.path_to_results):\n os.mkdir(self.path_to_results)\n\n fig = plt.figure()\n\n if self.show_points == 1:\n plt.scatter(self.x_list_grid, self.y_list_grid, c='blue')\n\n plt.plot(self.x_list_main, self.y_list_main,\n 'green', label='straight path')\n plt.plot(self.x_list, self.y_list, 'red', label='first path')\n plt.plot(self.x_list_filtered, self.y_list_filtered,\n 'blue', label='filtered path')\n plt.title('Paths')\n plt.ylabel('Latitude')\n plt.xlabel('Longitude')\n # plt.legend()\n\n fig.savefig(os.path.join(self.path_to_results, 'Paths.png'))", "def dbg():\n ds = np.array([[0.0, 0.0, 0.0]], dtype=np.float32)\n cs = np.array([[0.0, 0.0, 0.0]], dtype=np.float32)\n\n # Get segmentation matrices\n mean_box, mean_bg = np.loadtxt('segmentation_data/mean_box.txt', dtype='float64'), np.loadtxt('segmentation_data/mean_bg.txt', dtype='float64')\n cv_box, cv_bg = np.loadtxt('segmentation_data/covariance_box.txt', dtype='float64'), np.loadtxt('segmentation_data/covariance_bg.txt', dtype='float64')\n segmentation_array = [mean_box, mean_bg, cv_box, cv_bg]\n # superimpose all from the out directory\n for (d, c) in load_all_msg():\n d0, c0 = get_render_points(d, c, segmentation_array)\n ds = np.concatenate((ds, d0))\n cs = np.concatenate((cs, c0))\n\n v = pptk.viewer(ds)\n v.set(point_size=0.0001, phi=0, r=1, theta=0)\n v.set(lookat=np.array([0.0, 0.0, 0.0], dtype=np.float32))\n np_colors_filtered = cs.astype(float)\n np_colors_filtered /= 255\n np_colors_filtered = np.c_[np_colors_filtered, np.ones(np_colors_filtered.shape[0])]\n v.attributes(np_colors_filtered)", "def plot_points(self,recon_data,zoom=\"dynamic\",varname='wspd',barbs=False,scatter=False,\\\n ax=None,return_ax=False,prop={},map_prop={}):\n \n #Set default properties\n default_prop={'obs_colors':'plasma','obs_levels':np.arange(30,151,10),'sortby':varname,'linewidth':1.5,'ms':7.5}\n default_map_prop={'res':'m','land_color':'#FBF5EA','ocean_color':'#EDFBFF',\\\n 'linewidth':0.5,'linecolor':'k','figsize':(14,9),'dpi':200}\n \n #Initialize plot\n prop = self.add_prop(prop,default_prop)\n map_prop = self.add_prop(map_prop,default_map_prop)\n self.plot_init(ax,map_prop)\n \n #set default properties\n input_prop = prop\n input_map_prop = map_prop\n \n #error check\n if isinstance(zoom,str) == False:\n raise TypeError('Error: zoom must be of type str')\n \n #--------------------------------------------------------------------------------------\n \n #Keep record of lat/lon coordinate extrema\n max_lat = None\n min_lat = None\n max_lon = None\n min_lon = None\n\n #Check for storm type, then get data for storm\n if isinstance(recon_data,pd.core.frame.DataFrame):\n pass\n else:\n raise RuntimeError(\"Error: recon_data must be dataframe\")\n\n #Retrieve storm data\n lats = recon_data['lat']\n lons = recon_data['lon']\n\n #Add to coordinate extrema\n if max_lat == None:\n max_lat = max(lats)\n else:\n if max(lats) > max_lat: max_lat = max(lats)\n if min_lat == None:\n min_lat = min(lats)\n else:\n if min(lats) < min_lat: min_lat = min(lats)\n if max_lon == None:\n max_lon = max(lons)\n else:\n if max(lons) > max_lon: max_lon = max(lons)\n if min_lon == None:\n min_lon = min(lons)\n else:\n if min(lons) < min_lon: min_lon = min(lons)\n\n #Plot recon data as specified\n \n if barbs:\n \n dataSort = recon_data.sort_values(by='wspd').reset_index(drop=True)\n norm = mlib.colors.Normalize(vmin=min(prop['obs_levels']), vmax=max(prop['obs_levels']))\n cmap = mlib.cm.get_cmap(prop['obs_colors'])\n colors = cmap(norm(dataSort['wspd'].values))\n colors = [tuple(i) for i in colors]\n qv = plt.barbs(dataSort['lon'],dataSort['lat'],\\\n *uv_from_wdir(dataSort['wspd'],dataSort['wdir']),color=colors,length=5,linewidth=0.5)\n \n if scatter:\n \n dataSort = recon_data.sort_values(by=prop['sortby'],ascending=(prop['sortby']!='p_sfc')).reset_index(drop=True)\n prop['obs_levels']=np.linspace(min(dataSort[varname]),max(dataSort[varname]),256)\n cmap = mlib.cm.get_cmap(prop['obs_colors'])\n \n sc = plt.scatter(dataSort['lon'],dataSort['lat'],c=dataSort[varname],cmap = cmap,\\\n vmin=min(prop['obs_levels']), vmax=max(prop['obs_levels']), s=prop['ms'])\n\n #--------------------------------------------------------------------------------------\n \n #Pre-generated zooms\n if zoom in ['north_atlantic','conus','east_conus']:\n bound_w,bound_e,bound_s,bound_n = self.set_projection(zoom)\n \n #Storm-centered plot domain\n elif zoom == \"dynamic\":\n \n bound_w,bound_e,bound_s,bound_n = self.dynamic_map_extent(min_lon,max_lon,min_lat,max_lat)\n self.ax.set_extent([bound_w,bound_e,bound_s,bound_n], crs=ccrs.PlateCarree())\n \n #Custom plot domain\n else:\n \n #Check to ensure 3 slashes are provided\n if zoom.count(\"/\") != 3:\n raise ValueError(\"Error: Custom map projection bounds must be provided as 'west/east/south/north'\")\n else:\n try:\n bound_w,bound_e,bound_s,bound_n = zoom.split(\"/\")\n bound_w = float(bound_w)\n bound_e = float(bound_e)\n bound_s = float(bound_s)\n bound_n = float(bound_n)\n self.ax.set_extent([bound_w,bound_e,bound_s,bound_n], crs=ccrs.PlateCarree())\n except:\n raise ValueError(\"Error: Custom map projection bounds must be provided as 'west/east/south/north'\")\n \n #Determine number of lat/lon lines to use for parallels & meridians\n self.plot_lat_lon_lines([bound_w,bound_e,bound_s,bound_n])\n \n #--------------------------------------------------------------------------------------\n \n #Add left title\n dot = u\"\\u2022\"\n if barbs:\n vartitle = f'{dot} flight level wind'\n if scatter:\n if varname == 'sfmr':\n vartitle = f'{dot} SFMR surface wind'\n if varname == 'wspd':\n vartitle = f'{dot} flight level wind'\n if varname == 'p_sfc':\n vartitle = f'{dot} surface pressure'\n self.ax.set_title('Recon '+vartitle,loc='left',fontsize=17,fontweight='bold')\n\n #Add right title\n #max_ppf = max(PPF)\n start_date = dt.strftime(min(recon_data['time']),'%H:%M UTC %d %b %Y')\n end_date = dt.strftime(max(recon_data['time']),'%H:%M UTC %d %b %Y')\n self.ax.set_title(f'Start ... {start_date}\\nEnd ... {end_date}',loc='right',fontsize=13)\n\n #--------------------------------------------------------------------------------------\n \n #Add legend\n\n #Add colorbar\n \n #Return axis if specified, otherwise display figure\n if ax != None or return_ax == True:\n return self.ax,'/'.join([str(b) for b in [bound_w,bound_e,bound_s,bound_n]])\n else:\n plt.show()\n plt.close()", "def project_ranges(cb, msg, attributes):\n if skip(cb, msg, attributes):\n return msg\n\n plot = get_cb_plot(cb)\n x0, x1 = msg.get('x_range', (0, 1000))\n y0, y1 = msg.get('y_range', (0, 1000))\n extents = x0, y0, x1, y1\n x0, y0, x1, y1 = project_extents(extents, plot.projection,\n plot.current_frame.crs)\n coords = {'x_range': (x0, x1), 'y_range': (y0, y1)}\n return {k: v for k, v in coords.items() if k in attributes}", "def lap_mat(self):", "def gate2(self, *dim_ranges):\n def kd_tree_cache(data):\n points = self.get_points(*[r.dim for r in dim_ranges])\n data.tree = KDTree(points)\n global services\n data = services.cache((self, [r.dim for r in dim_ranges]), kd_tree_cache)\n rect = Rectangle(\n [r.min for r in dim_ranges],\n [r.max for r in dim_ranges])\n new_indices = data.tree.query_range(rect)\n return DataTable(self.data[new_indices], self.dims)", "def Gridding(vis,uvw,image_params,obs_params,pswf):\t\n\tref_freq = obs_params['ref_freq']/1e6\n\t#print 'ref freq =', ref_freq\n\tlat \t = obs_params['lat']\n\tch_width = obs_params['ch_width']\n\tDEC \t = obs_params['DEC']\n\tStokes = image_params['Stokes']\n\t\n\tprint '--------------Gridding X stokes--------------------'\n\txgrid_wt, xgrid_uv, N = gridder(vis[0],uvw,image_params,obs_params,pswf)\n\tprint '--------------Gridding Y stokes--------------------'\n\tygrid_wt, ygrid_uv, N = gridder(vis[1],uvw,image_params,obs_params,pswf)\n\n\tN = np.shape(xgrid_wt)[0]\n\tgrid_uv = np.zeros([N, N], dtype=complex)\n\tgrid_wt = np.zeros([N, N], dtype=complex)\n\t\n\tif Stokes == 'I':\n\t\t#combine X and Y gridded vis to create the I pol gridded vis\n\t\t# I = (XX+YY)/2\n\t\tgrid_uv.real = (ygrid_uv.real + xgrid_uv.real)/2\n\t\tgrid_uv.imag = (ygrid_uv.imag + xgrid_uv.imag)/2\n\n\t\t#combine X and Y gridded wt to create the I pol gridded wt\n\t\tgrid_wt.real = (ygrid_wt.real + xgrid_wt.real)/2\n\t\tgrid_wt.imag = (ygrid_wt.imag + xgrid_wt.imag)/2\n\n\telif Stokes == 'Q':\n\t\t#combine X and Y gridded vis to create the I pol gridded vis\n\t\t# Q = (XX-YY)/2\n\t\tgrid_uv.real = (ygrid_uv.real - xgrid_uv.real)/2\n\t\tgrid_uv.imag = (ygrid_uv.imag - xgrid_uv.imag)/2\n\n\t\t#combine X and Y gridded wt to create the I pol gridded wt\n\t\tgrid_wt.real = (ygrid_wt.real - xgrid_wt.real)/2\n\t\tgrid_wt.imag = (ygrid_wt.imag - xgrid_wt.imag)/2\n\n\tdty_image=np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(IF.pad_fft(grid_uv))))\n\tpsf_image=np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(IF.pad_fft(grid_wt))))\n\n\treturn dty_image, psf_image", "def visualize(self):\n colors = {'outline': (220, 220, 220),\n 'inlier': (0, 255, 0),\n 'outlier': (0, 0, 255),\n 'lines': (128, 220, 128)}\n # Create output image for visualization\n gap = 5\n h1, w1 = self.target.image.shape[:2]\n h2, w2 = self.image.shape[:2]\n vis = np.zeros((max(h1, h2), w1 + w2 + gap, 3), np.uint8)\n vis[:h1, :w1, :] = self.target.image\n w1 += gap\n vis[:h2, w1:w1+w2, :] = self.image\n \n # Draw the located object \n quad = np.float32(self.quad) + np.float32([w1, 0])\n self.draw(vis, colors['outline'], 2, quad)\n \n # draw point details\n inliers = [(x0, y0, x1 + w1, y1) for (x0, y0), (x1, y1) in self.inliers]\n outliers = [(x0, y0, x1 + w1, y1) for (x0, y0), (x1, y1) in self.outliers]\n if colors['outlier'] is not None: # draw x on each point\n r = 2 # radius\n thickness = 2\n for x0, y0, x1, y1 in outliers:\n cv2.line(vis, (x0 - r, y0 - r), (x0 + r, y0 + r), colors['outlier'], thickness)\n cv2.line(vis, (x0 + r, y0 - r), (x0 - r, y0 + r), colors['outlier'], thickness)\n cv2.line(vis, (x1 - r, y1 - r), (x1 + r, y1 + r), colors['outlier'], thickness)\n cv2.line(vis, (x1 + r, y1 - r), (x1 - r, y1 + r), colors['outlier'], thickness)\n if colors['lines'] is not None:\n for x0, y0, x1, y1 in inliers:\n cv2.line(vis, (x0, y0), (x1, y1), colors['lines'], 1)\n if colors['inlier'] is not None:\n for x0, y0, x1, y1 in inliers:\n cv2.circle(vis, (x0, y0), 2, colors['inlier'], -1)\n cv2.circle(vis, (x1, y1), 2, colors['inlier'], -1)\n return vis", "def plot_variables(self, val=None, roi=None) -> None:\n\n if val is None:\n val = self.history.MAP\n\n if roi is None:\n roi = np.array([0])\n elif np.isscalar:\n roi = np.array([roi])\n\n data = self.data\n\n dt = val.dt\n gain = val.gain\n states = val.states\n mu_flor = val.mu_flor\n mu_back = val.mu_back\n num_rois = val.num_rois\n num_load = val.num_load\n num_data = val.num_data\n num_states = val.num_states\n\n times = np.arange(num_data) * dt\n\n fig = plt.gcf()\n fig.clf()\n #fig.set_size_inches(12, 6)\n ax = np.empty((len(roi), 1), dtype=object)\n gs = gridspec.GridSpec(nrows=len(roi), ncols=1, figure=fig)\n ax[0, 0] = fig.add_subplot(gs[0, 0])\n for i in range(len(roi)-1):\n ax[i, 0] = fig.add_subplot(gs[i, 0], sharex=ax[0, 0])\n\n for i, r in enumerate(roi):\n\n brightness = mu_flor @ states_to_pops(states[r, :, :], num_states) + mu_back[r]\n\n ax[i, 0].set_title('roi {}: {} flors'.format(r, val.num_flor[r]))\n ax[i, 0].set_xlabel('time (s)')\n ax[i, 0].set_ylabel('brightness (ADU)')\n ax[i, 0].plot(times, data[r, :], color='g', label='data')\n ax[i, 0].plot(times, brightness * gain, color='b', label='sampled')\n ax[i, 0].legend()\n\n # plt.tight_layout()\n plt.pause(.1)\n\n return", "def show_dbscan():\n\n # simulate normal hourly data\n weekday = ([0.05, 0.95], 0.05) #bath, bed\n weekend = ([0.3, 0.7], 0.1)\n roomperwd, truelabelswd = make_blobs(n_samples=23, centers=weekday[0],\n cluster_std=weekday[1], random_state=0)\n roomperwe, truelabelswe = make_blobs(n_samples=8, centers=weekend[0],\n cluster_std=weekend[1], random_state=0)\n\n # combine modes\n roompers = np.vstack((roomperwd, roomperwe))\n\n # make positive and sum to one to simulate valid distribution\n for i in range(roompers.shape[0]):\n for j in range(roompers.shape[1]):\n if roompers[i, j] < 0:\n roompers[i, j] = 0\n roompersnorm = normalize(roompers, norm='l1')\n\n # simulate anomaly on most recent day where don't leave bedroom\n roompersnorm[-1, :] = np.array([0.8, 0.2])\n\n # detect outliers\n roompersdetector = HourlyRoomPercentageAnomalyDetection(roompersnorm, eps=0.3, min_samples=3)\n labels = roompersdetector.scale_and_proximity_cluster(eps=0.3, min_samples=3)\n\n # plot results\n plt.figure()\n seenflag1 = False; seenflag2 = False; seenflag3 = False;\n for i, label in enumerate(labels):\n if label == 0:\n if seenflag1:\n plt.plot(roompersnorm[i][0], roompersnorm[i][1], 'ro')\n else:\n plt.plot(roompersnorm[i][0], roompersnorm[i][1], 'ro', label='Cluster 1')\n seenflag1 = True\n elif label == 1:\n if seenflag2:\n plt.plot(roompersnorm[i][0], roompersnorm[i][1], 'kx')\n else:\n plt.plot(roompersnorm[i][0], roompersnorm[i][1], 'kx', label='Cluster 2')\n seenflag2 = True\n elif label == -1:\n if seenflag3:\n plt.plot(roompersnorm[i][0], roompersnorm[i][1], 'b^')\n else:\n plt.plot(roompersnorm[i][0], roompersnorm[i][1], 'b^', label='Outlier')\n seenflag3 = True\n plt.legend(loc='lower left')\n plt.axis([0, 1, 0, 1])\n plt.show()", "def plot_field_uncertainties():\n\n resize_size = (1000, 1000)\n\n\n dirs = [os.path.join(path_to_here, '../data/landscape_visualizations/{}/{}/'.format(drug_name, j)) for j in ['original', 'repeat_a', 'repeat_b']]\n if drug_name == 'DMSO':\n dirs = [os.path.join(path_to_here, '../data/landscape_visualizations/{}/{}/30_hours/'.format(drug_name, j)) for j in ['original', 'repeat_a', 'repeat_b']]\n\n def transform(x):\n if type(x) is np.ndarray:\n x = change_array_lims(x)\n x = np.log(x)\n return x\n\n F_unc_vmin = -7\n F_unc_vmax = -4\n sigma_vmin = -5\n sigma_vmax = 0 #0.4\n sigma_unc_vmin = -6\n sigma_unc_vmax = -2\n\n fig_Fs = [plt.figure() for _ in range(3)]\n fig_uncertainty = plt.figure()\n sigma_lists, F_arrays = [], []\n for idx_fig, dir in enumerate(dirs):\n\n p_list = _load_and_resize_list(dir+'p_list_0.pickle')\n D_list = _load_and_resize_list(dir+'D_list_0.pickle')\n U_array = pickle.load(open(dir+'U.pickle', 'rb'))\n U_array = cv2.resize(U_array, resize_size, interpolation = cv2.INTER_LINEAR)\n Gx, Gy = np.gradient(U_array, 26./resize_size[0], 26./resize_size[0]) # gradients with respect to x and y\n F_array = (Gx**2+Gy**2)**.5 # gradient magnitude\n F_array[np.isinf(F_array)] = np.nan\n F_array[p_list[-1]<1e-3]=np.nan # final PDF\n sigma_list = []\n for j in range(9):\n arr = D_list[2*j] # current PDF\n arr[p_list[j]<1e-3]=np.nan\n sigma_list.append(np.sqrt(2*arr))\n\n\n sigma_lists.append(sigma_list)\n F_arrays.append(F_array)\n\n ax = fig_Fs[idx_fig].add_subplot(111)\n ax.imshow(transform(F_array)[::-1, :], cmap = cmap, vmin = -4.6, vmax = -2)\n ax.set_title(dir)\n\n all_axes = [i for j in fig_Fs for i in j.axes]\n for ax in all_axes:\n ax.axis('off')\n\n # uncertainties\n\n std = np.std(F_arrays, axis = 0)\n ax = fig_uncertainty.add_subplot(121)\n ax.imshow(transform(std)[::-1, :], cmap = cmap, vmin = F_unc_vmin, vmax = F_unc_vmax)\n ax.set_title('F_uncertainty')\n\n fig_sigma = plt.figure()\n ax = fig_sigma.add_subplot(111)\n ax.imshow(transform(np.nanmean(sigma_lists[0], axis = 0))[::-1, :], cmap = cmap, vmin = sigma_vmin, vmax = sigma_vmax) # index 0 (i.3 'original' is corresponds to the landscapes in other figures)\n ax.set_title('sigma_mean')\n\n sigma_means = [np.nanmean(sigma_list, axis = 0) for sigma_list in sigma_lists]\n std_array = np.nanstd(sigma_means, axis = 0)\n ax = fig_uncertainty.add_subplot(122)\n ax.imshow(transform(std_array)[::-1, :], cmap = cmap, vmin = sigma_unc_vmin, vmax = sigma_unc_vmax)\n ax.set_title('sigma_uncertainty')\n\n fig_sigma.savefig(path_to_here+'/../outputs/{}_mean_sigma.png'.format(drug_name), dpi = 1200)\n fig_uncertainty.savefig(path_to_here+'/../outputs/{}_uncertainties.png'.format(drug_name), dpi = 1200)", "def dump_step(self,status):\n super(vanderpol_output,self).dump_step(status)\n\n L = self.level\n\n oldcol = self.sframe\n # self.sframe = self.ax.scatter(L.uend.pos.values[0],L.uend.pos.values[1],L.uend.pos.values[2])\n self.sframe = self.ax.scatter(L.uend.values[0],L.uend.values[1])\n # Remove old line collection before drawing\n # if oldcol is not None:\n # self.ax.collections.remove(oldcol)\n plt.pause(0.00001)\n\n return None", "def visibility(azmap, rmap, elmap, DEM_res, DEM_xmin, DEM_ymin, rad_x, rad_y,\n dr, daz, verbose=True):\n\n range_max = np.max(rmap)\n nr = int(range_max / dr)\n nrows = len(elmap)\n ncols = len(elmap[0])\n visib = np.zeros([nrows, ncols])\n minviselev = np.zeros([nrows, ncols])\n\n rr_start = dr / 2\n\n azmin = 0.\n azmax = 0.\n rrmin = 0.\n rrmax = 0.\n azmin_sin = 0.\n azmax_sin = 0.\n azmin_cos = 0.\n azmax_cos = 0.\n el_rmin_azmin = 0.\n el_rmin_azmax = 0.\n el_rmax_azmin = 0.\n el_rmax_azmax = 0.\n el_max = 0.\n el_max_prev = 0.\n\n kx_rmin_azmin = 0\n kx_rmin_azmax = 0\n kx_rmax_azmin = 0\n kx_rmax_azmax = 0\n ky_rmin_azmin = 0\n ky_rmin_azmax = 0\n ky_rmax_azmin = 0\n ky_rmax_azmax = 0\n\n radkx = int(np.round((rad_x - DEM_xmin) / DEM_res))\n radky = int(np.round((rad_y - DEM_ymin) / DEM_res))\n for kx in range(radkx - 1, radkx + 2):\n for ky in range(radky - 1, radky + 2):\n visib[ky, kx] = 100\n minviselev[ky, kx] = elmap[ky, kx]\n\n az_ = np.arange(0, 360 + daz, daz)\n\n for azind in range(len(az_)):\n if verbose:\n logging.info(f'Computing azimuth {az_[azind]:2.1f}')\n az = az_[azind]\n azmin = az - daz / 2.\n azmax = az + daz / 2.\n if azmin < 0:\n azmin = 360. + azmin\n indseta = np.logical_or(np.logical_and(azmap >= 0,\n azmap < azmax),\n np.logical_and(azmap >= azmin,\n azmap <= 360.))\n elif azmax > 360:\n azmax = azmax - 360.\n indseta = np.logical_or(np.logical_and(azmap >= azmin,\n azmap <= 360),\n np.logical_and(azmap >= 0,\n azmap < azmax))\n else:\n indseta = np.logical_and(azmap >= azmin,\n azmap < azmax)\n\n azmin_sin = np.sin(azmin * np.pi / 180.)\n azmax_sin = np.sin(azmax * np.pi / 180.)\n azmin_cos = np.cos(azmin * np.pi / 180.)\n azmax_cos = np.cos(azmax * np.pi / 180.)\n\n indseta = np.where(indseta)\n\n el_max_prev = -90\n for rind in range(nr):\n rr = rr_start + rind * dr\n\n rrmin = rr - dr / 2\n rrmax = rr + dr / 2\n\n if np.any(indseta):\n\n indsetr = np.logical_and(rmap[indseta] >= rrmin,\n rmap[indseta] < rrmax)\n\n indsetr = tuple([indseta[0][indsetr],\n indseta[1][indsetr]]) # Cells to set\n\n kx_rmin_azmin = radkx + \\\n int(round((rrmin * azmin_sin) / DEM_res))\n kx_rmin_azmax = radkx + \\\n int(round((rrmin * azmax_sin) / DEM_res))\n kx_rmax_azmin = radkx + \\\n int(round((rrmax * azmin_sin) / DEM_res))\n kx_rmax_azmax = radkx + \\\n int(round((rrmax * azmax_sin) / DEM_res))\n\n ky_rmin_azmin = radky + \\\n int(round((rrmin * azmin_cos) / DEM_res))\n ky_rmin_azmax = radky + \\\n int(round((rrmin * azmax_cos) / DEM_res))\n ky_rmax_azmin = radky + \\\n int(round((rrmax * azmin_cos) / DEM_res))\n ky_rmax_azmax = radky + \\\n int(round((rrmax * azmax_cos) / DEM_res))\n\n el_rmin_azmin = -90.\n el_rmin_azmax = -90.\n el_rmax_azmin = -90.\n el_rmax_azmax = -90.\n\n if ((kx_rmin_azmin >= 0) and (kx_rmin_azmin < ncols) and\n (ky_rmin_azmin >= 0) and (ky_rmin_azmin < nrows)):\n el_rmin_azmin = elmap[ky_rmin_azmin, kx_rmin_azmin]\n if ((kx_rmin_azmax >= 0) and (kx_rmin_azmax < ncols) and\n (ky_rmin_azmax >= 0) and (ky_rmin_azmax < nrows)):\n el_rmin_azmin = elmap[ky_rmin_azmax, kx_rmin_azmax]\n if ((kx_rmax_azmin >= 0) and (kx_rmax_azmin < ncols) and\n (ky_rmax_azmin >= 0) and (ky_rmax_azmin < nrows)):\n el_rmin_azmin = elmap[ky_rmax_azmin, kx_rmax_azmin]\n if ((kx_rmax_azmax >= 0) and (kx_rmax_azmax < ncols) and\n (ky_rmax_azmax >= 0) and (ky_rmax_azmax < nrows)):\n el_rmin_azmin = elmap[ky_rmax_azmax, kx_rmax_azmax]\n\n el_max = max([el_rmin_azmin, el_rmin_azmax,\n el_rmax_azmin, el_rmax_azmax])\n\n if np.any(indsetr):\n el_max = max([el_max, np.max(elmap[indsetr])])\n if el_max >= el_max_prev:\n visib[indsetr] = 100\n minviselev[indsetr] = el_max\n else:\n minviselev[indsetr] = el_max_prev\n\n el_max = max([el_max, el_max_prev])\n el_max_prev = el_max\n\n return visib, minviselev", "def rpoints(self):\n return self.gmap.interp_gpos(self.points)", "def map_sim_property(**kwargs):\n\n GR = glo.global_results()\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n counter = 0\n fignum = 1\n if p.gal_index == 'all':\n\n for gal_index in GR.N_gal - np.arange(GR.N_gal) - 1:\n\n if counter == 0:\n fig, axes = plt.subplots(3, 3, figsize=(20,15))\n axs = [axes[0,0],axes[0,1],axes[0,2],axes[1,0],axes[1,1],axes[1,2],axes[2,0],axes[2,1],axes[2,2]]\n counter = 9\n\n gal_ob = gal.galaxy(GR=GR, gal_index=gal_index)\n simgas = aux.load_temp_file(gal_ob=gal_ob,data_type='simgas')\n map2D,lab,max_scale = make_projection_map(simgas,prop=p.prop)\n if p.prop == 'm': map2D = map2D * simgas.m.sum()/np.sum(map2D) \n\n # Plot\n Rmax = max_scale/2\n ax1 = axs[9 - counter]\n if p.log:\n map2D[map2D < 10.**p.vmin] = 10.**p.vmin/2\n map2D[map2D > 10.**p.vmax] = 10.**p.vmax\n map2D = np.log10(map2D)\n if not p.log:\n map2D[map2D < p.vmin] = p.vmin/2\n map2D[map2D > p.vmax] = p.vmax\n im = ax1.imshow(map2D,\\\n extent=[-Rmax,Rmax,-Rmax,Rmax],vmin=p.vmin,cmap=p.cmap)\n fig.colorbar(im,shrink=0.8,ax=ax1,label=lab)\n if not p.add: ax1.set_xlabel('x [kpc]'); ax1.set_ylabel('y [kpc]')\n # Limit axes limits a bit to avoid area with no particles...\n ax1.set_xlim([-0.99*Rmax,0.99*Rmax])\n ax1.set_ylim([-0.99*Rmax,0.99*Rmax])\n if (p.prop == 'm') & (p.text == True):\n ax1.text(0.05,0.85,'M$_{gas}$=%.2eM$_{\\odot}$' % np.sum(simgas.m),\\\n fontsize=14,transform=ax1.transAxes,color='white')\n ax1.text(0.05,0.75,'SFR=%.2eM$_{\\odot}$/yr' % GR.SFR[gal_index],\\\n fontsize=14,transform=ax1.transAxes,color='white')\n\n counter -= 1\n\n #if counter == 0:\n # ax1 = plt.subplots(1, 1)\n #cbar = fig.colorbar(im, ax=axes.ravel().tolist(), shrink=0.95, label=lab)\n # fig.colorbar(im,shrink=0.8,label=lab)\n\n if counter == 0 or gal_index == GR.N_gal-1:\n print('Saving in ' + p.d_plot + 'sim_data/map_%s_%s_gals_%i.%s' % (p.prop,p.z1,fignum,p.format))\n # plt.tight_layout()\n if not os.path.isdir(p.d_plot + 'sim_data/'): os.mkdir(p.d_plot + 'sim_data/')\n plt.savefig(p.d_plot + 'sim_data/map_%s_%s_gals_%i.%s' % (p.prop,p.z1,fignum,p.format), format=p.format, dpi=250, facecolor='w')\n fignum += 1\n\n else:\n if p.add:\n fig, ax1 = plt.gcf(), p.ax\n if not p.add:\n fig = plt.figure(figsize=(8,6))\n ax1 = fig.add_axes([0.1, 0.01, 0.8, 0.8]) \n ax1.axis('equal')\n\n gal_ob = gal.galaxy(GR=GR, gal_index=p.gal_index)\n simgas = aux.load_temp_file(gal_ob=gal_ob,data_type=p.sim_type)\n if p.R_max:\n # Cut out square\n simgas = simgas[(np.abs(simgas.x) < p.R_max) & (np.abs(simgas.y) < p.R_max)]\n # Add bottom left corner\n extra_row = simgas.iloc[0] # to ensure that map gets the right size\n extra_row['x'],extra_row['y'] = -p.R_max,-p.R_max\n extra_row[p.prop] = 0\n simgas = simgas.append(extra_row).reset_index(drop=True) \n # Add top right corner\n extra_row = simgas.iloc[0] # to ensure that map gets the right size\n extra_row['x'],extra_row['y'] = p.R_max,p.R_max\n extra_row[p.prop] = 0\n simgas = simgas.append(extra_row).reset_index(drop=True) \n else:\n pass\n map2D,lab,max_scale = make_projection_map(simgas,prop=p.prop)\n if p.prop == 'm': map2D = map2D * simgas.m.sum()/np.sum(map2D) \n print('Min and max of map: ',map2D.min(),map2D.max())\n #map2D[map2D < 1e4] = 1e6\n # Plot map\n if not p.R_max:\n p.R_max = max_scale/2\n if p.log: \n if not p.vmax: p.vmax = np.log10(map2D).max()\n if not p.vmin: p.vmin = np.log10(map2D).max() - 4\n map2D[map2D < 10.**p.vmin] = 10.**p.vmin/2\n map2D[map2D > 10.**p.vmax] = 10.**p.vmax\n map2D = np.log10(map2D)\n else:\n if not p.vmax: p.vmax = np.max(map2D)\n if not p.vmin: p.vmin = np.min(map2D) / 1e3\n map2D[map2D < p.vmin] = p.vmin #np.min(map2D[map2D > 0])\n map2D = np.flipud(map2D)\n\n im = ax1.imshow(map2D,\\\n extent=[-max_scale/2,max_scale/2,-max_scale/2,max_scale/2],vmin=p.vmin,vmax=p.vmax,cmap=p.cmap)\n # Limit axes limits a bit to avoid area with no particles...\n zoom = 1#/1.5\n ax1.set_xlim([-1/zoom * p.R_max,1/zoom * p.R_max])\n ax1.set_ylim([-1/zoom * p.R_max,1/zoom * p.R_max])\n if p.colorbar: \n divider = make_axes_locatable(ax1)\n cax1 = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n fig.colorbar(im,cax=cax1,label=lab)\n if not p.add: ax1.set_xlabel('x [kpc]'); ax1.set_ylabel('y [kpc]')\n if (p.prop == 'm') & (p.text == True):\n simstar = aux.load_temp_file(gal_ob=gal_ob,data_type='simstar')\n ax1.text(0.05,0.92,'M$_{star}$=%.1e M$_{\\odot}$' % np.sum(simstar.m),\\\n fontsize=14,transform=ax1.transAxes,color='white')\n ax1.text(0.05,0.86,'M$_{gas}$=%.1e M$_{\\odot}$' % np.sum(simgas.m),\\\n fontsize=14,transform=ax1.transAxes,color='white')\n ax1.text(0.05,0.80,'SFR=%.2f M$_{\\odot}$/yr' % GR.SFR[p.gal_index],\\\n fontsize=14,transform=ax1.transAxes,color='white')\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'sim_data/'): os.mkdir(p.d_plot + 'sim_data/') \n plt.savefig(p.d_plot + 'sim_data/map_%s_G%i.png' % (p.prop,p.gal_index), format=p.format, dpi=250, facecolor='w')\n\n if not p.colorbar: return(im)", "def main():\n \n # boundaries of the array\n n = 30\n m = 70\n \n # array generated\n arr = rangeArray(n,m)\n \n # Print section\n print(\"Array from {} to {}: {}\".format(n, m, arr))", "def show_path_2D(start, end, coordinates, polygons, clear = True):\n global L, N, delta_t\n\n # start interactive mode\n plt.ion()\n\n # crete eempty figure on which data will go and first subplot\n fig = plt.figure()\n\n # get into the correct time step\n for time_step in range(start, end):\n # list of colours used for animation\n colours = cm.rainbow(np.linspace(0, 1, N))\n\n # loop over each particle and colour\n for i in range(N):\n # plot x, y poistion of particle in a given colour and set axis to size of box\n plt.scatter(coordinates[time_step][i][0], coordinates[time_step][i][1], s = 1, color = 'r')\n\n # plot the object\n if i < M:\n polygon = np.array(polygons[time_step][i])\n # get the points of the polygon to plot it\n x, y = polygon.T\n\n # print(x, y)\n\n x = np.append(x, x[0])\n y = np.append(y, y[0])\n\n # print(x, y)\n\n # plot the polygon\n plt.plot(x , y)\n # plt.scatter(polygons_com[time_step][i][0], polygons_com[time_step][i][1], s = 5, color = 'g')\n\n if bound_cond == True:\n plt.axis([0, L, 0, L])\n plt.axis([0, L, 0, L])\n # plt.axis([-L*2, L*2, -L*2, L*2])\n\n # show graph\n plt.show()\n plt.pause(time_pause)\n\n # decide if you want to clear\n if clear == True:\n plt.clf()\n\n return None", "def make_plot(range_km, unfolded_phidp, refl, phidp, kdp, filename):\n\n from matplotlib import pyplot as plt\n\n fig = plt.figure(figsize=[10, 5])\n ax = fig.add_subplot(111)\n\n # filtered phidp and unfolded phidp\n (p1,) = ax.plot(range_km, phidp[\"data\"][0], \"b-\")\n (p2,) = ax.plot(range_km, unfolded_phidp[\"data\"][0], \"g-\")\n\n # set labels\n ax.set_ylim(0, 250)\n ax.set_ylabel(\"Differential phase shift (degrees)\")\n ax.set_xlabel(\"Range (km)\")\n\n # plot KDP and reflectivity on second axis\n ax2 = ax.twinx()\n (p3,) = ax2.plot(range_km, kdp[\"data\"][0], \"r-\")\n (p4,) = ax2.plot(range_km, refl[\"data\"][0] / 10.0)\n\n # decorate and save\n ax2.yaxis.grid(color=\"gray\", linestyle=\"dashed\")\n ax.legend(\n [p1, p2, p3, p4],\n [\"Filtered phiDP\", \"Unfolded phiDP\", \"KDP\", \"Z/10.0\"],\n loc=\"upper left\",\n )\n fig.savefig(filename)", "def P_AI_Rocky(in_dict):\n # START\n fs = 16\n plt.rc('font', size=fs)\n fig = plt.figure(figsize=(14,12))\n ds = nc.Dataset(in_dict['fn'])\n\n # PLOT CODE\n aa = [-122.8, -122.54, 47.92, 48.22]\n import cmocean\n cmap = cmocean.cm.balance\n # cmap = 'RdYlBu_r'\n\n from warnings import filterwarnings\n filterwarnings('ignore') # skip some warning messages\n \n # plot Code\n \n # calculate divergence and vorticity\n uu = ds['u'][0, -1, :, :]\n vv = ds['v'][0, -1, :, :]\n u = zfun.fillit(uu)\n v = zfun.fillit(vv)\n u[np.isnan(u)] = 0\n v[np.isnan(v)] = 0\n \n G = zrfun.get_basic_info(in_dict['fn'], only_G=True)\n \n dive = ((np.diff(u, axis=1)/G['DX'][:, 1:-1])[1:-1, :]\n + (np.diff(v, axis = 0)/G['DY'][1:-1, :])[:, 1:-1])\n #dive[G['mask_rho'][1:-1,1:-1]==False] = np.nan\n \n vort = np.diff(v, axis=1)/G['DX'][1:,1:] - np.diff(u, axis=0)/G['DY'][1:,1:]\n #vort[G['mask_rho'][1:,1:]==False] = np.nan\n \n scl = 2e-3\n \n # panel 1\n ax = fig.add_subplot(121)\n # cs = plt.pcolormesh(G['lon_psi'], G['lat_psi'], dive/scl, cmap=cmap,\n # vmin=-1, vmax=1)\n cs = plt.pcolormesh(G['lon_rho'][1:-1,1:-1], G['lat_rho'][1:-1,1:-1], dive/scl, cmap=cmap,\n vmin=-1, vmax=1, shading='gouraud')\n tstr = (r'Surface Divergence (%0.1e $s^{-1}$)' % (scl))\n #pfun.add_bathy_contours(ax, ds, txt=True)\n pfun.add_coast(ax)\n ax.axis(aa)\n pfun.dar(ax)\n ax.set_xlabel('Longitude')\n ax.set_ylabel('Latitude')\n ax.set_title(tstr)\n pfun.add_info(ax, in_dict['fn'])\n ax.set_xticks([-122.8, -122.7, -122.6])\n ax.set_yticks([48, 48.1, 48.2])\n #\n # panel 2\n ax = fig.add_subplot(122)\n # cs = plt.pcolormesh(G['lon_rho'], G['lat_rho'], vort/scl, cmap=cmap,\n # vmin=-1, vmax=1)\n cs = plt.pcolormesh(G['lon_psi'], G['lat_psi'], vort/scl, cmap=cmap,\n vmin=-1, vmax=1, shading='gouraud')\n tstr = (r'Surface Vorticity (%0.1e $s^{-1}$)' % (scl))\n ax.set_xticks([-122.8, -122.7, -122.6])\n ax.set_yticks([])\n #fig.colorbar(cs)\n \n # Inset colorbar\n from mpl_toolkits.axes_grid1.inset_locator import inset_axes\n cbaxes = inset_axes(ax, width=\"4%\", height=\"40%\", loc='lower left')\n fig.colorbar(cs, cax=cbaxes, orientation='vertical')\n \n #pfun.add_bathy_contours(ax, ds)\n pfun.add_coast(ax)\n ax.axis(aa)\n pfun.dar(ax)\n ax.set_xlabel('Longitude')\n ax.set_title(tstr) \n \n #fig.tight_layout()\n # FINISH\n ds.close()\n if len(in_dict['fn_out']) > 0:\n plt.savefig(in_dict['fn_out'])\n plt.close()\n else:\n plt.show()\n plt.rcdefaults()", "def _plot_pwm_x_pos_matrix(pwm_x_pos, agg_vector):\n # extract and save out to tmp file\n\n\n # then plot in R\n\n \n return", "def print_cords(self):\n print('startX :', self.startX, ' ,startY :', self.startY, ' ,endX :', self.endX, ' ,endY :', self.endY)", "def DT(time_lvl = 0, date = 160924 ):\n \n #-------Customised color in RGB ------------\n C = [[232,232,230],#grey\n [203,203,203], #grey\n [161,161,161], #grey\n [130,130,130], #grey\n [149,53,229], #lillac, 39\t64\t197\t149,53,229\n [39,64,197], #blue dark,7,67,194\n [15,110,229], #blue\n [80,149,240], #blue\n [74,192,243], #blue\n [152,219,248], #blue\n [183,237,247], #blue\n [251,217,198], #redish\n [255,197,166], #redish\n [255,172,164], #redish\n [253,139,142], #redish\n [253,101,105], #redish\n [255,66,74], #redish\n [238,13,28], #red\n [214,78,166], #pink\n [214,102,201], \n [217,155,210],\n [216,181,211]]\n C = np.array( C )\n C = np.divide( C, 255. ) # RGB has to be between 0 and 1 in python\n #-----------------------------------------------------------\n \n fig = plt.figure()\n \n \n #-----Setting our map area and projection of interest-------\n m = Basemap( llcrnrlon = -90., llcrnrlat = 0., urcrnrlon = 50., urcrnrlat=70.,\\\n resolution = 'l', area_thresh = 10000., projection = 'merc' )\n #m = Basemap(width=11500000,height=8500000,resolution='l',projection='eqdc',\\\n # lat_1=07.,lat_2=40,lat_0=44,lon_0=-30.)\n #m = Basemap(width=190000,height=2200000,resolution='l', projection='tmerc',lon_0=-30,lat_0=44)\n \n map_area( m ) # ploting background\n path = \"gribs/\"\n file = path +\"DT_var.grib\"\n obj = pygrib.open( file )\n \n #-FETCHING ALL THE VALUES----------------------------------------\n #-----Potential temperature---------------------------------------\n lat, lon, data = get_data( obj,'Potential temperature', 2000, date, timelevel = time_lvl )\n contour_val = np.linspace( 264, 384, 22 ) #contours for potential tempeature\n plot_contourf( m, lat, lon, data, C, contour_val )\n \n #-----Relative vorticity, diff level------------------------------\n contour=[ 2.8E-4, 3.5E-4, 4.5E-4, 6.5E-4, 7.E-4, 7.5E-4, 8.E-4 ] #1.5E-4,2.5E-4]#\n lat, lon, data925 = get_data( obj, 'Vorticity (relative)', 925, date, timelevel = time_lvl )\n lat, lon, data900 = get_data( obj, 'Vorticity (relative)', 900, date, timelevel = time_lvl )\n lat, lon, data850 = get_data( obj, 'Vorticity (relative)', 850, date, timelevel = time_lvl )\n \n #->--->---->--mean value over height and filtering----------------\n data = np.sqrt( data900**2 + 2*data850**2 + data925**2 ) #Vertical \"average\", weightet values at 850hpa double.\n footprint = np.array([[0,0,0,1,1,1,1,0,0,0], #footprint=np.ones((3,10))\n [0,0,1,1,1,2,1,1,0,0],\n [1,1,1,2,2,1,2,1,1,1],\n [0,1,1,1,1,2,1,1,1,0],\n [0,0,1,1,1,1,1,1,0,0]])\n \n data = ndimage.generic_filter( data, np.mean, footprint = footprint, mode='wrap' )\n plot_contour( m, lat,lon, data,contour, clr = 'k' )\n \n #-----Wind barbs----------------------------------------------------\n lat, lon, data_u = get_data( obj , 'U component of wind', 2000, date, timelevel = time_lvl )\n lat, lon, data_v = get_data( obj , 'V component of wind', 2000, date, timelevel = time_lvl )\n plot_wind_bar( m, lat, lon, data_u, data_v )\n #-----------------------------------------------\n #-----------------------------------------------\n \n \n \n #-SAVE AND CLOSE----------------------------------------------------\n #------------------------------------------------------------------\n obj.close()\n if time_lvl == 0:\n t = \"0000\"\n elif time_lvl == 1:\n t = \"1200\"\n elif time_lvl == 2:\n t = \"1800\"\n else: \n t = \"t_not_set\"\n \n fig_name = \"DT/DT_\" + str( date ) + \"_\" + str( t )+ \".TIFF\" \n \n ax = plt.gca( )\n plt.rc( 'font', size = 6 )\n fig.set_size_inches( 12.80, 7.15 )\n \n fig.savefig( fig_name, dpi = 600 )\n plt.close( )\n #plt.show()\n #--------------------------\n #----------------------------", "def plot_priorsamps(meta):\n priorsamps = np.array(meta.priordist.sample_ps(len(meta.colors))[0])\n f = plt.figure(figsize=(5,10))\n sps_log = f.add_subplot(2,1,1)\n sps_lin = f.add_subplot(2,1,2)\n sps_log.set_title(meta.name)\n f.subplots_adjust(hspace=0, wspace=0)\n sps_log.set_ylabel(r'$\\ln[p(z|\\vec{\\theta})]$')\n sps_lin.set_xlabel(r'$z$')\n sps_lin.set_ylabel(r'$p(\\vec{\\theta})$')\n sps_log.set_xlim(meta.binends[0]-meta.bindif,meta.binends[-1]+meta.bindif)#,s_run.seed)#max(n_run.full_logfltNz)+m.log(s_run.seed/meta.zdif)))\n sps_lin.set_xlim(meta.binends[0]-meta.bindif,meta.binends[-1]+meta.bindif)#,s_run.seed)#max(n_run.full_logfltNz)+m.log(s_run.seed/meta.zdif)))\n plotstep(sps_log,meta.binends,meta.logintPz,l=r'Log Interim Prior $\\ln[p(z|\\vec{\\theta}^{0})$]')\n plotstep(sps_lin,meta.binends,meta.intPz,l=r'Interim Prior $p(z|\\vec{\\theta}^{0})$')\n for c in lrange(meta.colors):\n plotstep(sps_log,meta.binends,priorsamps[c]-np.log(meta.ngals),c=meta.colors[c])\n plotstep(sps_lin,meta.binends,np.exp(priorsamps[c]-np.log(meta.ngals)),c=meta.colors[c])\n sps_log.legend(loc='upper right',fontsize='x-small')\n sps_lin.legend(loc='upper right',fontsize='x-small')\n f.savefig(os.path.join(meta.topdir, 'priorsamps.pdf'),bbox_inches='tight', pad_inches = 0)\n return" ]
[ "0.59820175", "0.5681855", "0.5632588", "0.55993336", "0.5567278", "0.5558949", "0.55241627", "0.552349", "0.552066", "0.5517947", "0.5500038", "0.54207855", "0.5374178", "0.5372016", "0.5361266", "0.5345306", "0.534296", "0.5339282", "0.5336971", "0.5336114", "0.53311783", "0.53250515", "0.5303563", "0.52854484", "0.52657664", "0.5260444", "0.5255669", "0.52521664", "0.5249639", "0.52338564", "0.52273214", "0.5208867", "0.5204293", "0.5203738", "0.5203709", "0.51990944", "0.5198463", "0.5189345", "0.51891106", "0.51877826", "0.51775455", "0.5173777", "0.51733214", "0.51365614", "0.51300055", "0.5127636", "0.5125996", "0.5125799", "0.5124396", "0.5118249", "0.5117339", "0.5114051", "0.51085865", "0.50886434", "0.5084515", "0.5080295", "0.50787425", "0.50718594", "0.5069522", "0.50572467", "0.5050096", "0.50455785", "0.5045283", "0.50423145", "0.50403243", "0.5038993", "0.50376105", "0.503623", "0.5034436", "0.5032353", "0.50322396", "0.5029498", "0.50281894", "0.5025487", "0.50221056", "0.50219274", "0.50199455", "0.50182766", "0.5010649", "0.5003376", "0.5002664", "0.50013137", "0.49975017", "0.49896926", "0.49766287", "0.49725574", "0.49692842", "0.4958814", "0.4956542", "0.49552375", "0.4954655", "0.4950264", "0.4948868", "0.49478954", "0.49476597", "0.49474022", "0.49465522", "0.494328", "0.49414825", "0.49333733" ]
0.6764529
0
Generate random RGB colors for a given seed
def get_random_rgb(seed): random.seed(seed) r = random.randint(0, 255) g = random.randint(0, 255) b = random.randint(0, 255) return [r, g, b]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def random_color_gen():\n r = randint(0, 255)\n g = randint(0, 255)\n b = randint(0, 255)\n return [r, g, b]", "def random_color():\n colormode(255)\n return randint(0, 255), randint(0, 255), randint(0, 255)", "def _genRandomColor():\n b = random.randint(0, 255)\n g = random.randint(0, 255)\n r = random.randint(0, 255)\n return (b, g, r)", "def randcolor():\n return (randint(0,255), randint(0,255), randint(0,255))", "def rand_color(x):\n random.seed(x)\n color = random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)\n color = '#%02x%02x%02x' % color\n return color", "def random_color():\n\n rgbl=[255,0,0]\n random.shuffle(rgbl)\n return tuple(rgbl)", "def _random_color():\n return random.randint(0, 255)", "def getRandomColor():\n r = randint(0, 255)\n g = randint(0, 255)\n b = randint(0, 255)\n return \"rgb(\" + str(r) + \", \" + str(g) + \", \" + str(b) +\")\"", "def randcolor():\r\n r = random(0.0, 1.0)\r\n g = random(0.0, 1.0)\r\n b = random(0.0, 1.0)\r\n return vec(r, g, b) # A color is a three-element vec\r", "def get_random_color():\n r=random.randint(0,255)\n g=random.randint(0,255)\n b=random.randint(0,255)\n return(r,g,b)", "def random_colour(rng: random.Random) -> TupleInt3:\n r = rng.randint(0, 255)\n g = rng.randint(0, 255)\n b = rng.randint(0, 255)\n return r, g, b", "def random_color():\n r = lambda: random.randint(0, 255)\n color = ('%02X%02X%02X' % (r(), r(), r()))\n return color", "def generate_colour():\n red = random.randrange(0, 256)\n green = random.randrange(0, 256)\n blue = random.randrange(0, 256)\n alpha = random.randrange(0, 256)\n return (red, green, blue, alpha)", "def getRandColor():\n\treturn (randrange(0,256), randrange(0,256), randrange(0,256))", "def create_color():\n r = random.randint(0,255)\n g = random.randint(0,255)\n b = random.randint(0,255)\n a = random.randint(0,255)\n return introcs.RGB(r,g,b,a)", "def create_random_color(self):\n # Create a list of n colors.\n n = 4\n dc = 1.0 / (n-1)\n color_list = [i*dc for i in range(n)]\n\n if self.is_scaffold:\n rgb = [1.0, 1.0, 1.0]\n else:\n rgb = [random.choice(color_list) for i in range(3)]\n # Don't generate blue (that's for a scaffold in cadnano) or black.\n if (rgb[0] == 0.0) and (rgb[1] == 0.0):\n rgb[0] = random.choice(color_list[1:])\n if rgb[2] == 0.0: \n rgb[2] = random.choice(color_list[1:]) \n #__if (rgb[0] == 0) and (rgb[1] == 0)\n #__if self.is_scaffold\n return rgb", "def random_color():\n return random.choice(colors)", "def random_color() -> Tuple[int, int, int]:\n return randrange(0, 255), randrange(0, 255), randrange(0, 255)", "def Randomize(seed=None):\n random.seed()", "def random_color(num):\n # 为每个类别的边界框随机匹配相应颜色\n np.random.seed(80)\n COLORS = np.random.randint(0, 256, size=(num, 3), dtype='uint8') #\n return COLORS", "def randomcolour(self):\n r = random.randrange(1, 255)\n g = random.randrange(1, 255)\n b = random.randrange(1, 255)\n self.colour((r,g,b))", "def random_color() -> Tuple[int, ...]:\n red = random.randrange(0, 255)\n blue = random.randrange(0, 255)\n green = random.randrange(0, 255)\n return (red, blue, green)", "def random_hex_color():\n r = randint(0, 255)\n g = randint(0, 255)\n b = randint(0, 255)\n\n return '#%x%x%x' % (r, g, b)", "def randColor():\r\n return np.array([random.random(), random.random(), random.random()]).reshape((1, 1, 3))", "def generateColor(text):\n random.seed(text)\n return ('#%06X' % random.randint(0,0xFFFFFF))", "def _random_color() -> List[float]:\n return [np.random.uniform(), np.random.uniform(), np.random.uniform()]", "def _rand_color(self):\n\n return self._rand_elem(COLOR_NAMES)", "def mutate_color(mutated_genome):\n seed = random.randint(0,2)\n if seed == 0:\n new_color(mutated_genome)\n elif seed == 1:\n change_color(mutated_genome)\n else: #seed == 2:\n switch_colors(mutated_genome)\n #else: seed == 3: # depricated\n # shuffle_colors(mutated_genome)", "def random_color():\n c = colors[random.randint(0, len(colors) - 1)]\n return c[1]", "def random_colors(N,bright=True):\n brightness = 1.0 if bright else 0.7\n hsv = [(i/N,1,brightness)for i in range(N)]\n colors = list(map(lambda c: clolorsys.hsv_to_rgb(*c),hsv))\n random.shuffle(colors)\n return colors", "def color_from_seed(seed):\n supported_colors = []\n for name, hex in matplotlib.colors.cnames.items():\n supported_colors.append(hex)\n ascii_character_sum = sum(bytearray(seed, \"utf8\")) # Sums the ASCII values of every character\n selection = ascii_character_sum % len(supported_colors)\n \n return supported_colors[selection]", "def _random_color(self):\n levels = range(0, 256)\n return tuple(random.choice(levels) for _ in range(3))", "def random():\n np.random.seed(1939)", "def change_color():\n return random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)", "def get_random_color():\n def get_random_value():\n \"\"\" Return a random value between 0.0 and 1.0 \"\"\"\n return randint(0, 255) / 256.0\n return Vector(get_random_value(), get_random_value(), get_random_value())", "def rand_branch_color():\n red = random.randint(0, 100)\n green = random.randint(175, 255)\n blue = random.randint(0, 100)\n return (red, green, blue)", "def generate_rgb(S, R_possible_values, R_probabilities):\n R, = rd.choices(population=R_possible_values, weights=R_probabilities)\n S -= R\n G = rd.randint(max(S - 255, 0), min(S, 255))\n S -= G\n B = S\n return (R, G, B)", "def _get_random_color(self):\r\n random_number = random.random()\r\n if random_number <= 1 / 3.0:\r\n return Color.RED\r\n else:\r\n return Color.BLACK", "def get_random_color():\n\n def get_random_value():\n \"\"\" Return a random value between 0.0 and 1.0 \"\"\"\n return randint(0, 255) / 256.0\n\n return Vector(get_random_value(), get_random_value(), get_random_value())", "def generate_random_colours_list(rng: random.Random, size: int) -> List[TupleInt3]:\n return [random_colour(rng) for _ in range(size)]", "def random_rgb() -> List[int, int, int]:\n hsl_color = (random.random(), 0.3, 0.8)\n rgb_color = colorsys.hls_to_rgb(*hsl_color)\n return [round(c * 255) for c in rgb_color]", "def random_colors(N, bright=True):\n brightness = 1.0 if bright else 0.7\n hsv = [(i / N, 1, brightness) for i in range(N)]\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\n random.shuffle(colors)\n return colors", "def random_colors(N, bright=True):\n brightness = 1.0 if bright else 0.7\n hsv = [(i / N, 1, brightness) for i in range(N)]\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\n random.shuffle(colors)\n return colors", "def random_colors(N, bright=True):\n brightness = 1.0 if bright else 0.7\n hsv = [(i / N, 1, brightness) for i in range(N)]\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\n random.shuffle(colors)\n return colors", "def randcolour():\n colour = [0,0,0]\n while sum(colour)<450:\n for i in range(3):\n colour[i] = int(random.random()*255)\n return(tuple(colour))", "def get_colour(value):\n \n np.random.seed(value)\n colour = [np.random.uniform() for i in range(3)]\n return (tuple(colour))", "def color_from_ind(i: int) -> np.ndarray:\n np.random.seed(i)\n return np.random.random(3)", "def random_colors(n, bright=True):\n brightness = 1.0 if bright else 0.7\n hsv = [(i / n, 1, brightness) for i in range(n)]\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\n random.shuffle(colors)\n return colors", "def random_colors(N, bright=True):\n import random\n import colorsys\n\n brightness = 1.0 if bright else 0.7\n hsv = [(i / N, 1, brightness) for i in range(N)]\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\n random.shuffle(colors)\n return colors", "def randcol():\n col = [randint(0,255) for _ in range(3)]\n return gfx.Color(tuple(col))", "def randomRGBValue(self):\n return random.randrange(0, 256)", "def setRandomColor():\n setColor(getRandomColor())", "def random_colors(self, N, bright=True):\r\n brightness = 1.0 if bright else 0.7\r\n hsv = [(i / N, 1, brightness) for i in range(N)]\r\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\r\n random.shuffle(colors)\r\n return colors", "def initialize_randomness(seed):", "def random():\n np.random.seed(0)", "def new_color(mutated_genome):\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if color_mode == 'RGB':\n color_red = random.randint(0,255)\n color_green = random.randint(0,255)\n color_blue = random.randint(0,255)\n color = (color_red, color_blue, color_green)\n else: #color_mode == 'L':\n color = random.randint(0,255)\n mutated_genome[index][0] = color", "def random_colors(N, bright=True):\n import random\n import colorsys\n brightness = 1.0 if bright else 0.7\n hsv = [(i / N, 1, brightness) for i in range(N)]\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\n random.shuffle(colors)\n return colors", "def seed(self, seed=None) -> List[np.ndarray]:\n self.np_random, seed_0 = seeding.np_random(seed)\n seed_1 = seeding.hash_seed(seed_0 + 1) % 2 ** 31\n return [seed_0, seed_1]", "def seed (self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]", "def get_seed(seed=None):\n # https://groups.google.com/forum/#!topic/briansupport/9ErDidIBBFM\n random = np.random.RandomState(seed)\n return random.randint(0, 2147483647)", "def shuffle_colors(mutated_genome):\n mutated_genome", "def randcolor(self, left_edge, right_edge):\n color_pixel = (\n random.randint(\n left_edge, right_edge), random.randint(\n left_edge, right_edge), random.randint(\n left_edge, right_edge))\n return color_pixel", "def get_random_rgb(cls):\n\n hex = cls.get_random_hex(return_sign=False)\n return cls.rgb_from_hex(hex)", "def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]", "def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]", "def mutate_color(color):\n color[random.randrange(0, 3)] = random.random() % 1\n return color", "def rand_bottom_color():\n red = random.randint(120, 160)\n green = random.randint(0, 90)\n blue = random.randint(0, 40)\n return (red, green, blue)", "def seed(self, seed=None):\n self._np_random, seed = seeding.np_random(seed)\n return [seed]", "def colors(k): \n ret = []\n for i in range(k):\n ret.append((random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1)))\n return ret", "def makeRGB(ncol = 16, minc = 32, maxc = 216):\n subd = int((maxc - minc)/ncol)\n numpy.random.seed(1)\n RGB = [[]]\n for r in range(minc, maxc, subd):\n for g in range(minc, maxc, subd):\n for b in range(minc, maxc, subd):\n RGB.append(numpy.array([r,g,b]))\n #print \"# of colors: \", len(self.RGB)\n rgb_order = numpy.random.permutation(len(RGB)) # randomize the order\n RGB = [RGB[x] for x in rgb_order]\n return RGB", "def color(step: int=10) -> Tuple[int, int, int]:\n # Randomly seed the r g b values\n r, g, b = (random_uniform(0, 255), random_uniform(0, 255),\n random_uniform(0, 255))\n\n # Randomly determine if each r g and b value is increasing or not\n r_inc = True\n g_inc = True\n b_inc = True\n r_step = random_uniform(step)\n g_step = random_uniform(step)\n b_step = random_uniform(step)\n\n # Yield the initial r, g, b values\n yield r, g, b\n\n # Loop and yeild forever\n while True:\n # If r is increasing\n if r_inc:\n # Increment r by the step\n r += r_step\n # Ensure that the next step will be within the limits\n # if not then set the flag to decreasing\n r_inc = r < 255 - r_step\n # If r is decreasing\n else:\n # Decrement r by the step\n r -= r_step\n # Ensure that the next step will be within the limits\n # if not then set the flag to increasing\n r_inc = r < r_step\n\n # See above\n if g_inc:\n g += g_step\n g_inc = g < 255 - g_step\n else:\n g -= g_step\n g_inc = g < g_step\n\n # See above\n if b_inc:\n b += b_step\n b_inc = b < 255 - b_step\n else:\n b -= b_step\n b_inc = b < b_step\n\n # Yield the red, green, and blue values\n yield r, g, b", "def random_color_func(word=None, font_size=None, position=None,\n orientation=None, font_path=None, random_state=None):\n if random_state is None:\n random_state = Random()\n return \"hsl(%d, 80%%, 50%%)\" % random_state.randint(0, 255)", "def get_random_color(color_list, **kwargs):\n color = None\n n_color_candidates = kwargs.get('n_color_candidates', 10)\n color_candidates_matrix = np.random.rand(n_color_candidates, 3) # creating matrix of candidate rgb values\n norm = 0.\n for i in range(n_color_candidates):\n candidate_color = color_candidates_matrix[i]\n candidate_norm = np.min([np.linalg.norm(existing_color-candidate_color) for existing_color in color_list])\n if candidate_norm > norm:\n norm = candidate_norm\n color = candidate_color\n color_list.append(color)\n return color", "def _seed(self, seed=None):\n self.rng, seed = seeding.np_random(seed)\n return [seed]", "def seed(self, seed=None):\n # to have a different environment at each time (resolve python random problem)\n self.np_random, seed1 = seeding.np_random(seed)\n seed2 = seeding.hash_seed(seed1 + 1) % 2 ** 31\n return [seed1, seed2]", "def generate_rgb(exist_colors: List[List[int, int, int]]) -> List[int, int, int]:\n largest_min_distance = 0\n best_color = random_rgb()\n if len(exist_colors) > 0:\n for _ in range(100):\n color = random_rgb()\n current_min_distance = min(_color_distance(color, c) for c in exist_colors)\n if current_min_distance > largest_min_distance:\n largest_min_distance = current_min_distance\n best_color = color\n _validate_color(best_color)\n return best_color", "def get_color_list(cluster_count):\n color_list = []\n for i in xrange(cluster_count):\n color_list.append(random_color_gen())\n return color_list", "def get_color_codes(num_classes):\r\n np.random.seed(RANDOM_SEED)\r\n return np.random.randint(0,np.iinfo(np.uint8).max, size=(num_classes, len('RGB'))).astype(np.uint8)", "def get_random(self):\n\n\t\treturn np.random.choice(\n\t\t\t\tself.Ncolors, size=self.codelength, \n\t\t\t\treplace=True, p=self.prior) + 1", "def random_color(search=None): \r\n if search: c = choice(search_color(search))\r\n else: c = choice(THECOLORS.values())\r\n \r\n #debug: print type(c), c # returns Color()\r\n return c \r\n #todo: exception on color search fail? OR just default to white.\r", "def desaturated_randcol(saturation,brightness=100):\n # begin with greyscale\n brightness = clamp(brightness,40,255)\n sat = clamp(saturation,0,255-brightness)\n col = [brightness,brightness,brightness]\n \n for i in range(3):\n r = randint(-sat,sat)\n if abs(r-brightness) < sat//2: # minimum saturation = sat / 2\n if r < 0:\n r = randint(-sat,-sat//2)\n else:\n r = randint(sat//2,sat)\n col[i] += r\n col[i] = clamp(col[i],50,253)\n\n return gfx.Color(col)", "def seed():", "def seed(self, seed=None):\n self.np_random, seed = gym.utils.seeding.np_random(seed)\n return [seed]", "def i_seed(seed, flag):\n global randrsl, mm\n\n for i in range(0 , 256):\n mm[i] = 0\n\n m = len(seed)\n\n for i in range(0, 256):\n if i >= m:\n randrsl[i] = 0\n else:\n randrsl[i] = seed[i]\n\n rand_init(flag)", "def randomSub(seed: float):\n crc = str(string.ascii_letters + string.digits)\n random.seed(seed)\n n = random.randint(10,30)\n return \"\".join(random.sample(crc, n))", "def random_seed(seed):\n state = RandomState()\n random.seed(seed) # alter state\n np.random.seed(seed)\n torch.manual_seed(seed)\n yield\n state.set_global()", "def pastel_randcol():\n # begin with saturated then desaturate\n col = saturated_randcol()\n offset = 175\n \n for i in range(3):\n if col[i] == 255:\n continue\n elif col[i] != 0:\n col[i] = min(col[i]+offset,255)\n else:\n col[i] += offset\n \n return gfx.Color(tuple(col))", "def random_seed(i): # -> None:\n ...", "def seed(seed: int) -> None:\n ...", "def seed():\n pass", "def seed():\n pass", "def uniqueish_color():\n return plt.cm.gist_ncar(np.random.random())", "def temp_seed(cntxt_seed):\n state = np.random.get_state()\n np.random.seed(cntxt_seed)\n try:\n yield\n finally:\n np.random.set_state(state)", "def set_seed(cls, seed: Any) -> None:\n cls.rand = Random(seed)", "def seed(seed: int):\n # all sampling is actually happening in the move_cube module\n move_cube.seed(seed)", "def saturated_randcol(value=255):\n # one channel is zero, one is random and one is max\n channels = [0,randint(0,255),255]\n col = [0,0,0]\n value = clamp(value,40,255)\n \n for i in range(3):\n channel = choice(channels)\n col[i] = channel*value//255\n channels.remove(channel)\n \n return gfx.Color(tuple(col))", "def square_parameters_from_seed(seed):\n # canvas\n canvas_mode = \"RGB\"\n canvas_size_in_pixels = (200, 200)\n canvas_background_color_rgb = (255, 255, 255) # white\n \n # rectangle\n rectangle_position = position_from_seed(seed) # ((0, 0), (10, 10))\n rectangle_fill = color_from_seed(seed)\n rectangle_outline = color_from_seed(seed + str(random.randint(1, 100))) # offset\n \n return canvas_mode, canvas_size_in_pixels, canvas_background_color_rgb, rectangle_position, rectangle_fill, rectangle_outline", "def get_random_value():\n return randint(0, 255) / 256.0", "def test_gen_colors(self):\n result = magic.gen_colors(\"tests/test_files/test.jpg\")\n self.assertEqual(result[0], \"#0F191A\")", "def getrandomcolor(self) -> str:\n return self.tab10[random.randint(0, len(self.tab10)-1)]" ]
[ "0.814636", "0.7701456", "0.76977193", "0.74837154", "0.746802", "0.737749", "0.73708177", "0.7207782", "0.7160151", "0.7154133", "0.7105417", "0.70931727", "0.70826614", "0.7022558", "0.7013978", "0.69533306", "0.6892857", "0.68925637", "0.6831503", "0.6779909", "0.67476416", "0.6726678", "0.6701285", "0.66899127", "0.6683659", "0.6679428", "0.6672197", "0.6656367", "0.6653879", "0.66413194", "0.6638635", "0.6619267", "0.6603917", "0.658105", "0.65284777", "0.6504878", "0.650209", "0.64836997", "0.648228", "0.6470852", "0.64601165", "0.6445245", "0.6445245", "0.6445245", "0.64337254", "0.6430013", "0.64141977", "0.641023", "0.6409011", "0.6364031", "0.6353899", "0.6301589", "0.6294633", "0.62868416", "0.6269651", "0.62652266", "0.6231465", "0.6223594", "0.6214282", "0.62015593", "0.6200679", "0.6198754", "0.6191586", "0.6184998", "0.6184998", "0.61633676", "0.616245", "0.6156887", "0.6153022", "0.61471486", "0.61244947", "0.61156344", "0.6112134", "0.6105534", "0.61050963", "0.6074193", "0.6054725", "0.60418105", "0.6039249", "0.60379577", "0.60367453", "0.6024583", "0.6014824", "0.59957844", "0.59844625", "0.59741324", "0.5965011", "0.59432644", "0.59372824", "0.59296453", "0.59296453", "0.59088427", "0.5881518", "0.5855556", "0.5855021", "0.5851355", "0.5816542", "0.580459", "0.579796", "0.5794206" ]
0.84251815
0
Get a binary mask with threshold on probabilities
def threshold_mask(mask, threshold=0.5): mask[np.where(mask >= threshold)] = 1. mask[np.where(mask < threshold)] = 0. return mask
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def binary_predict(probs, threshold = 0.5):\n return (probs >= threshold) * np.ones(len(probs))", "def pred_from_prob(a,threshold):\n bin_preds = np.zeros((np.size(a,0),))\n bin_preds[np.where(a[:,1]>threshold)]=1.0\n return bin_preds", "def _np_get_mask(prob_map, prob_thresh=0.5):\n mask = (prob_map > prob_thresh) * 255\n return mask.astype(np.uint8)", "def mask(self):\n\n mask = self.freqs >= self.minimum_threshold\n mask = mask.astype(int)\n self.freqs = self.freqs * mask\n self.sums = self.sums * mask", "def get_threshold_mask(hparams, x):\n\n axis = list(range(1, x.shape.ndims))\n min_val = tf.reduce_min(x, axis=axis, keepdims=True)\n max_val = tf.reduce_max(x, axis=axis, keepdims=True)\n thresh = min_val + hparams.threshold_factor * (max_val - min_val)\n cond = tf.less(x, thresh)\n return tf.where(cond, tf.zeros(tf.shape(x)), tf.ones(tf.shape(x)))", "def get_biomass(binary_mask):\n\n white_pixels = cv2.countNonZero(binary_mask)\n return white_pixels", "def preprocess_mask(y):\n y[y <= 255./2] = 0 # Needs to be in this order, otherwise 1 gets overwritten\n y[y > 255./2] = 1\n binary_mask = y.astype(np.uint8)\n\n return binary_mask", "def bin_thresh(img: np.ndarray, thresh: Number) -> np.ndarray:\n res = img >= thresh\n return res", "def create_binary_mask(self, type='negative'):\n if not self.thresh_map_name:\n return None\n mode = self.thresh_mode\n limits = self.thresh_limits\n map = self.map_scalars\n if mode=='mask lower':\n m = (map < limits[0]) if type=='negative' else (map >= limits[0])\n elif mode=='mask higher':\n m = (map > limits[1]) if type=='negative' else (map <= limits[1])\n elif mode=='mask between':\n m = ( (map > limits[0]) & (map < limits[1]) ) \\\n if type=='negative' \\\n else ( (map <= limits[0]) | (map >= limits[1]) )\n else: # mask outside\n m = ( (map < limits[0]) | (map > limits[1]) ) \\\n if type=='negative' \\\n else ( (map >= limits[0]) & (map <= limits[1]) )\n return m", "def predict_mask(logit, EMPTY_THRESHOLD, MASK_THRESHOLD):\n #pred mask 0-1 pixel-wise\n #n = logit.shape[0]\n IMG_SIZE = logit.shape[-1] #256\n #EMPTY_THRESHOLD = 100.0*(IMG_SIZE/128.0)**2 #count of predicted mask pixles<threshold, predict as empty mask image\n #MASK_THRESHOLD = 0.22\n #logit = torch.sigmoid(torch.from_numpy(logit)).view(n, -1)\n #pred = (logit>MASK_THRESHOLD).long()\n #pred[pred.sum(dim=1) < EMPTY_THRESHOLD, ] = 0 #bug here, found it, the bug is input shape is (256, 256) not (16,256,256)\n logit = sigmoid(logit)#.reshape(n, -1)\n pred = (logit>MASK_THRESHOLD).astype(np.int)\n if pred.sum() < EMPTY_THRESHOLD:\n return np.zeros(pred.shape).astype(np.int)\n else:\n return pred", "def get_sample_mask(self):", "def threshold_probs(probs):\n classes = np.ones(len(probs),)\n classes[probs < 0.5] = 0\n return classes", "def get_binary_mask(self,index):\n mask = self.load_mask_png(index)\n (rows,cols) = np.where(mask>0)[0:2] #pixels in mask disregarding the color\n new_mask = np.zeros(shape=mask.shape[0:2], dtype=np.uint8)\n new_mask[(rows,cols)] = 255\n return new_mask", "def adjusted_classes(pred_prob, threshold):\n return [1 if y >= threshold else 0 for y in pred_prob]", "def fim_mask(model, dataset, samples, threshold):\n fisher_diagonal = fisher_matrix(model, dataset, samples)\n mask = [tensor < threshold for tensor in fisher_diagonal]\n return mask", "def get_dropout_mask(dropout_probability: float, tensor_for_masking: mx.ndarray.ndarray.NDArray):\n binary_mask = mx.nd.random.uniform(0, 1, tensor_for_masking.shape) > dropout_probability\n # Scale mask by 1/keep_prob to preserve output statistics.\n dropout_mask = binary_mask.float().div(1.0 - dropout_probability)\n return dropout_mask", "def binary_array(array, thresh, value=0):\n if value == 0:\n # Create an array of ones with the same shape and type as \n # the input 2D array.\n binary = np.ones_like(array) \n \n else:\n # Creates an array of zeros with the same shape and type as \n # the input 2D array.\n binary = np.zeros_like(array) \n value = 1\n \n # If value == 0, make all values in binary equal to 0 if the \n # corresponding value in the input array is between the threshold \n # (inclusive). Otherwise, the value remains as 1. Therefore, the pixels \n # with the high Sobel derivative values (i.e. sharp pixel intensity \n # discontinuities) will have 0 in the corresponding cell of binary.\n binary[(array >= thresh[0]) & (array <= thresh[1])] = value\n \n return binary", "def get_binary_mask(op_weights):\n return op_weights[\"mask\"]", "def make_bw(im, th=150):\n im_gray = np.mean(im, axis=2)\n im_binary = im_gray > th\n boolean_to_numbers = lambda b: 1 if b else -1\n v_boolean_to_numbers = np.vectorize(boolean_to_numbers)\n return v_boolean_to_numbers(im_binary)", "def preds_to_binary(pred_arr, channel_scaling=None, bg_threshold=0):\n pred_arr = _check_skimage_im_load(pred_arr).copy()\n\n if len(pred_arr.shape) == 3:\n if pred_arr.shape[0] < pred_arr.shape[-1]:\n pred_arr = np.moveaxis(pred_arr, 0, -1)\n if channel_scaling is None: # if scale values weren't provided\n channel_scaling = np.ones(shape=(pred_arr.shape[-1]),\n dtype='float')\n pred_arr = np.sum(pred_arr*np.array(channel_scaling), axis=-1)\n\n mask_arr = (pred_arr > bg_threshold).astype('uint8')\n\n return mask_arr*255", "def preds_to_binary(pred_arr, channel_scaling=None, bg_threshold=0):\n pred_arr = _check_skimage_im_load(pred_arr).copy()\n\n if len(pred_arr.shape) == 3:\n if pred_arr.shape[0] < pred_arr.shape[-1]:\n pred_arr = np.moveaxis(pred_arr, 0, -1)\n if channel_scaling is None: # if scale values weren't provided\n channel_scaling = np.ones(shape=(pred_arr.shape[-1]),\n dtype='float')\n pred_arr = np.sum(pred_arr*np.array(channel_scaling), axis=-1)\n\n mask_arr = (pred_arr > bg_threshold).astype('uint8')\n\n return mask_arr*255", "def scale(img):\n result = [[1 if x > BINARY_THRESHOLD else 0 for x in row] for row in img]\n return result", "def make_mask(image_shape, fieldmap, activation_data,\n sigma=0.2, threshold=0.9, alpha=0.1):\n offset, shape, step = fieldmap\n activations = numpy.zeros(image_shape)\n activations[_centered_slice(fieldmap, activation_data.shape)] = (\n activation_data)\n blurred = gaussian_filter(\n activations, sigma=tuple(s * sigma for s in shape), mode='constant')\n maximum = blurred.flatten().max()\n return 1 - (1 - alpha) * (blurred < maximum * 0.9)", "def test_soft_threshold_array():\n a = np.array([10, -10, 200, -200])\n np.testing.assert_allclose(snet.soft_threshold(a, 100),\n np.array([0, 0, 100, -100]))\n np.testing.assert_allclose(snet.soft_threshold(a, 3),\n np.array([7, -7, 197, -197]))", "def bin_random_mat(m,n,p_0 = 0.5):\n\treturn np.array((np.random.randn(m,n) >= p_0), dtype = np.float)", "def MovigAverageBinaryThresholding(image, kernel_sigma, n, b): \n\n if kernel_sigma >= 1:\n image = Denoising(image, kernel_sigma);\n \n nr, nc = image.shape;\n mask = np.zeros(image.shape);\n \n for ii in range(0, nr):\n for jj in range(0, nc):\n if jj < n:\n nb_mean = image[ii, 0:jj + 1].sum() / n;\n else:\n nb = image[ii, jj - n + 1:jj + 1];\n nb_mean = np.mean(nb);\n if image[ii, jj] > b * nb_mean:\n mask[ii, jj] = 1; \n \n return mask;", "def global_threshold(img, threshold_method):\n pass", "def get_dropout_mask(\n dropout_probability: float, tensor_for_masking: torch.Tensor\n): # pragma: no cover\n binary_mask = tensor_for_masking.new_tensor(\n torch.rand(tensor_for_masking.size()) > dropout_probability\n )\n # Scale mask by 1/keep_prob to preserve output statistics.\n dropout_mask = binary_mask.float().div(1.0 - dropout_probability)\n return dropout_mask", "def generate_mask(self, thresh=50, b_ground=None):\n img = self.load_image()\n thresh = np.zeros(img.shape, \"uint8\")\n if b_ground is not None:\n img = img - b_ground\n thresh[img > 25] = 255\n mask = ndimage.morphology.binary_dilation(thresh).astype(\"uint8\")\n self.mask = 255*mask", "def binarize(X, *, threshold=..., copy=...):\n ...", "def gen_background_mask( img ):\n\t\t\n\tif len( img.shape ) == 3: t = img[0]\n\telif len( img.shape ) == 2: t = img\n\n\tmask = img > filters.threshold_li(t)\n\n\treturn mask", "def get_predict(prediction, threshold):\n\n prediction[prediction < threshold] = 0\n prediction[prediction >= threshold] = 1\n \n return prediction", "def thresholding(pred,label,thres):\n \n conf =[]\n \n for i in thres:\n \n pr_th,lab_th = (pred>i),(label>i)\n conf += confusion(pr_th,lab_th)\n \n return np.array(conf).reshape(-1,4)", "def discrete_potential(function, threshold):\n\n return np.where(function >= threshold, 1, 0)", "def lab_select(img, thresh=(0,255)):\n \n # 1) Convert to LAB color space\n lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)\n b = lab[:,:,2]\n\n # 2) Apply a threshold to the B channel\n binary_output = np.zeros_like(b)\n binary_output[ (b>thresh[0]) & (b<thresh[1]) ] = 1\n\n # 3) Return a binary image of threshold result\n return binary_output", "def get_dropout_mask(dropout_probability: float, h_dim: int):\n binary_mask = Variable(torch.FloatTensor(h_dim).cuda().fill_(0.0))\n binary_mask.data.copy_(torch.rand(h_dim) > dropout_probability)\n # Scale mask by 1/keep_prob to preserve output statistics.\n dropout_mask = binary_mask.float().div(1.0 - dropout_probability)\n return dropout_mask", "def apply_thresholding(x):\n return x > threshold_otsu(x)", "def convert_binary(pred):\n pred = pred.astype(np.float64)\n pred[pred <= 0.5] = 0.0\n pred[pred > 0.5] = 1.0\n return pred", "def matrice_binaire(matrice,threshold):\r\n for i in range(len(matrice)):\r\n for j in range(len(matrice[0])):\r\n if matrice[i][j]>threshold:\r\n matrice[i][j]=1\r\n else:\r\n matrice[i][j]=0\r\n \r\n return matrice", "def _threshold_mask(data, mask, rms=None, threshold=0.0):\n if rms is None or threshold <= 0.0:\n return mask.astype('bool')\n rms = np.atleast_1d(rms)\n if rms.ndim == 2:\n sigma_mask = abs(data) >= (threshold * rms)[None, :, :]\n else:\n sigma_mask = abs(data) >= threshold * rms\n return np.logical_and(mask, sigma_mask).astype('bool')", "def binaryThreshold(img, threshVal = 127, maxVal = 255, invert=True):\n\tgray = grayscale(img)\n\tif invert:\n\t\tret, thresh = cv2.threshold(img, threshVal, maxVal, cv2.THRESH_BINARY_INV)\n\telse:\n\t\tret, thresh = cv2.threshold(img, threshVal, maxVal, cv2.THRESH_BINARY)\n\treturn thresh", "def threshold_to_mask(\n img: PIL.Image.Image, threshold: float, relate: Callable[..., bool]\n) -> np.ndarray:\n img_arr = np.array(img)\n return relate(img_arr, threshold)", "def blur_mask(mask, blur_kernel, threshold=0.1):\n k = pyfits.getdata(blur_kernel)\n k = k / k.sum()\n mask = hconvolve.hconvolve(mask, k)\n mask = np.where(mask >= threshold, 1, 0).astype('int')\n return mask", "def SpatiallyAdaptiveBinaryThresholding(image, kernel_sigma, a, b): \n\n if kernel_sigma >= 1:\n image = Denoising(image, kernel_sigma); \n \n nr, nc = image.shape;\n mask = np.zeros(image.shape);\n \n for ii in range(1, nr - 1):\n for jj in range(1, nc - 1):\n nb = image[ii - 1:ii + 2, jj - 1:jj + 2];\n nb = nb.reshape(3, 3);\n nb_std = np.std(nb);\n nb_mean = np.mean(nb);\n \n if image[ii, jj] > a * nb_std and image[ii, jj] > b * nb_mean:\n mask[ii, jj] = 1;\n\n return mask;", "def _get_support_mask(self):\n mask = np.zeros(self.scores_.shape, dtype=bool)\n mask[self.scores_ >= self.min_count] = True\n return mask", "def binarize(matrix: numpy.array, threshold: float, inplace: bool = True) -> numpy.array:\n mask = matrix >= threshold\n if inplace:\n matrix_ = matrix\n else:\n matrix_ = matrix.copy()\n matrix_[mask] = 1\n matrix_[numpy.logical_not(mask)] = 0\n return matrix_", "def apply_mask(binary, mask_dict):\n result = \"\"\n for i, val in enumerate(binary):\n if mask_dict[i] in ('X', '1'):\n result += mask_dict[i]\n else:\n result += binary[i]\n return result", "def _get_binary(raw_data):\n #try:\n # gray_data = _skimage.color.rgb2gray(raw_data)\n #except:\n # gray_data = raw_data\n\n try:\n thresh = _skimage.filters.threshold_otsu(raw_data[:, :, 0])\n binary = raw_data[:, :, 0] > thresh\n #binary = binary[:, :, 0]\n except ValueError:\n print('valueerror')\n binary = _np.ones(raw_data.shape).astype('bool')\n\n return binary", "def apply_hounsfield_thresholding(data_, threshold: tuple = (200, 600)):\n mask = np.ma.masked_inside(data_, threshold[0], threshold[1], ).mask\n thresholded = np.zeros_like(data_)\n thresholded[mask] = data_[mask]\n return thresholded", "def cluster_threshold(stat_map, domain, th, csize):\n if stat_map.shape[0] != domain.size:\n raise ValueError('incompatible dimensions')\n\n # first build a domain of supra_threshold regions\n thresholded_domain = domain.mask(stat_map > th)\n\n # get the connected components\n label = thresholded_domain.connected_components()\n\n binary = - np.ones(domain.size)\n binary[stat_map > th] = label\n nbcc = len(np.unique(label))\n for i in range(nbcc):\n if np.sum(label == i) < csize:\n binary[binary == i] = - 1\n\n binary = (binary > -1)\n return binary", "def binarize(self, image, threshold):\n\n bin_img = image.copy()\n [h, w] = bin_img.shape\n opt_threshold = threshold\n print(opt_threshold)\n for row in range(h):\n for col in range(w):\n if bin_img[row, col] > opt_threshold: #greater than threshld white(general)\n bin_img[row, col] = 255 #0 instead of 1\n else: #less than threshold black(general)\n bin_img[row, col] = 0 #0 instead of 1\n\n\n #reverse the cases\n\n return bin_img", "def apply_threshold(heatmap, threshold):\n heatmap_thresh = np.copy(heatmap)\n ind = np.where(np.logical_and(heatmap_thresh>1, heatmap_thresh<=threshold))\n heatmap_thresh[ind] = 0\n #heatmap_thresh[(heatmap_thresh <= threshold)] = 0\n return heatmap_thresh", "def threshold(self, config ):\n java_object = pbg.gateway.jvm.boofcv.factory.filter.binary.FactoryThresholdBinary.\\\n threshold(config.java_obj,self.boof_image_type)\n return InputToBinary(java_object)", "def postprocessing(self, prediction, prob_thresh=0.5):\n prob_map = self._np_sigmoid(prediction)\n prob_map = self._np_merge_prediction(prob_map)\n if self.resize :\n prob_map = self._np_resize_image(prob_map,\n self.orig_size,\n dtype='float')\n mask = self._np_get_mask(prob_map, prob_thresh=prob_thresh)\n return mask", "def filter_labelmask(labelmask, func, above=0, below=1e6):\n labels = []\n for x in np.unique(labelmask):\n prop = func(labelmask, x)\n if (prop >= above and prop <= below):\n labels.append(x)\n labelmask_filtered = np.where(np.isin(labelmask, labels), labelmask, 0)\n return labelmask_filtered", "def attention_mask(x):\n mask = torch.zeros(len(x), len(x[0]))\n for i in range(len(x)):\n try:\n index = np.where(x[i]==1)[0][0]\n mask[i][index:] = -np.inf\n except:\n pass\n return mask", "def mask_and_fit(mask, binary_warped, flag):\n img = cv2.bitwise_and(binary_warped, binary_warped, mask=mask)\n x, y = extract_pixels(img)\n fit, foundFlag, confidence_index = check_and_fit(x, y, flag)\n return fit, foundFlag, confidence_index", "def threshold_mask(image, threshold):\n image = image.copy()\n if threshold == None:\n threshold = skimage.filters.threshold_isodata(image)\n image[image > threshold] = 255\n image[image <= threshold] = 0\n return image", "def make_attention_mask(source_block, target_block):\n mask = (target_block[None, :] >= 1) * (source_block[:, None] >= 1)\n mask = mask.astype(np.int64)\n # (source_length, target_length)\n return mask", "def featureBits(self,features, target):\n # Select Features\n features[features == 0] = 0.1\n p2 = np.floor(np.log(features)/np.log(2))\n \n select = (p2 != 13) & (p2 != -1)\n a = np.where(select)[0]\n select[a[:1000]] = False\n return select", "def threshold(img, thresh):\n return cv2.threshold(img, thresh, 255, cv2.THRESH_BINARY)[1]", "def lut(threshold):\n\n return [255 if i<=threshold else 0 for i in range(256)]", "def nn_threshold_predict(X, nn, theta):\n tmp = theta.reshape((1, np.size(theta))) - nn.predict(X)\n pred = np.sum(tmp < 0, axis=1).astype(np.int)\n return np.array(pred)", "def generate_mask(image, threshold):\n # TODO: Test this and optimize to only include pixels inward of the\n # horizon\n x_pix, y_pix = image.shape\n image_median = np.median(image)\n image_mean = np.mean(image)\n image_std = np.std(image)\n image_max = image.max()\n\n # generate mask\n mask = np.where(threshold < image, False, True)\n return mask", "def apply_thresholding_img(img, t1, t2):\n hist_threshold = np.where(img >= t1, img, 255)\n hist_threshold = np.where(hist_threshold < t2, 0, hist_threshold)\n return hist_threshold", "def bin_thres_img(img, ksize=3):\n # Apply each of the thresholding functions\n gradx = abs_sobel_thresh(img, orient='x', sobel_kernel=ksize, thresh=(20, 100))\n grady = abs_sobel_thresh(img, orient='y', sobel_kernel=ksize, thresh=(20, 100))\n\n mag_binary = mag_thresh(img, sobel_kernel=ksize, mag_thresh=(30, 100))\n dir_binary = dir_threshold(img, sobel_kernel=ksize, thresh=(0.7, 1.3))\n\n hls_binary = hls_select(img, thresh=(170, 255))\n\n combined = np.zeros_like(dir_binary)\n combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1)) | hls_binary == 1] = 1\n return combined", "def threshold(X, thresh):\n Y = np.array(X)\n Y[Y >= thresh] = 1\n Y[Y < thresh] = 0\n return Y", "def get_positive_mask(labels):\n batch_shape = tf.shape(labels)[0]\n mask_1 = tf.logical_not(get_negative_mask(labels))\n mask_2 = tf.logical_not(tf.eye(batch_shape, dtype=tf.bool))\n return tf.logical_and(mask_1, mask_2)", "def threshold(image, selem, out=None, mask=None, shift_x=False, shift_y=False):\n\n return _apply(_crank8.threshold, _crank16.threshold, image, selem, out=out,\n mask=mask, shift_x=shift_x, shift_y=shift_y)", "def binarize(self, image, threshold):\n bin_img = image.copy()\n for i in range(image.shape[0]):\n for j in range(image.shape[1]):\n if image[i, j] >= threshold:\n bin_img[i, j] = 0\n else:\n bin_img[i, j] = 255\n return bin_img", "def bin_thresh(img, threshold):\r\n _, new_img = cv2.threshold(img, threshold, 255, cv2.THRESH_BINARY) # 255 is the max value\r\n\r\n # make some smoothing to get rid of unnecessary ~splashes of color\r\n # parameters are the width and height of gaussian kernel, and standard deviation in X and Y direction,\r\n # sigmaX & sigmaY respectively. If only sigmaX is specified, sigmaY is taken as same as sigmaX.\r\n # If both are given as zeros, they are calculated from kernel size.\r\n new_img = cv2.GaussianBlur(new_img, (9, 9), 0)\r\n return new_img", "def preprocess_mask(mask):\n # Project values interval on [0.0; 1.0]\n if mask.max() > 1:\n mask[mask <= 127.5] = 0.\n mask[mask > 127.5] = 1.\n else:\n mask[mask <= .5] = 0.\n mask[mask > .5] = 1.\n return mask", "def image_binary(image_convert):\n image_bit=cv2.bitwise_not(image_convert)\n _, image_bina = cv2.threshold(image_bit, 125, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n image_bina=image_bina/255.0\n return image_bina", "def filterp(th,ProbClass1):\n y=np.zeros(ProbClass1.shape[0])\n for i,v in enumerate(ProbClass1):\n if ProbClass1[i]>th:\n y[i]=1\n return y", "def iou(binary_mask, prediction_mask, t=0.5):\n params = get_params()\n\n if params.nbr_of_classes == 1:\n predicted_class = prediction_mask >= t\n else:\n raise NotImplemented\n actual_class = binary_mask > 0.5\n\n intersection = tf.cast(tf.logical_and(predicted_class, actual_class), tf.float32)\n union = tf.cast(tf.logical_or(predicted_class, actual_class), tf.float32)\n\n return tf.divide(tf.reduce_sum(intersection), tf.reduce_sum(union))", "def anoise(this, *args, **kargs):\n\t\t\n\t\t# Arguments\n\t\tif not args: args = [50]\n\t\t\n\t\t# Kernel's retrieval\n\t\tanoisek = this._ANOISEK\n\t\tif anoisek is None: return None\n\t\t\n\t\t# More magic\n\t\tbin = this._BINARY\n\t\tfor thresh in args:\n\t\t\tbin[:,:] = (cv2.filter2D(bin, -1, anoisek) / 2.55 > thresh) * 255\n\t\treturn True", "def binary_sample(x):\n return np.random.binomial(1, p=x)", "def thresh_mask(arrayin, thresh=0.1e0):\r\n if arrayin.dtype == 'complex' :\r\n arrayout = np.abs(arrayin)\r\n else :\r\n arrayout = arrayin\r\n thresh2 = np.max(arrayout)*thresh\r\n arrayout = np.array(1.0 * (arrayout > thresh2),dtype=np.bool) \r\n return arrayout", "def binary_npv(target_tensor, forecast_probability_tensor):\n\n return 1. - binary_pofd(target_tensor, forecast_probability_tensor)", "def create_gt_mask(vocal_spec, bg_spec):\n vocal_spec = vocal_spec.numpy()\n bg_spec = bg_spec.numpy()\n return np.array(vocal_spec > bg_spec, dtype=np.float32)", "def apply_threshold(heatmap, threshold):\n heatmap[heatmap <= threshold] = 0\n\n return heatmap", "def generateRandomMask(size, p=0.5):\n mask_array = (np.random.random(size) > p).astype(int)\n mask = sitk.GetImageFromArray(mask_array) \n return mask", "def _prepare_mask_file(mask):\n result = np.ndarray((mask.shape[0], mask.shape[1]), dtype=np.uint8)\n for i in range(mask.shape[0]):\n for j in range(mask.shape[1]):\n\n if mask[i][j] > 0:\n result[i][j] = 1\n else:\n result[i][j] = 0\n \n return result", "def binarize_labels(labels):\n labels = np.where(labels == 0, labels, 1)\n\n return labels", "def binarize(array, threshold=0.5):\r\n \r\n if (np.amax(array) > 1.0) or (np.amin(array) < 0.0):\r\n raise ValueError('Voxel value fed to lambda in converting to original labels was out of range.')\r\n \r\n # obtain binarized output\r\n binarized = array.copy()\r\n zero_mask = (binarized <= threshold)\r\n binarized[zero_mask] = 0.0\r\n binarized[~zero_mask] = 1.0\r\n \r\n return binarized", "def get_mask(total, begin, end):\n mask = np.zeros([total]).astype(np.float32)\n mask[begin:end] = 1\n return np.array(mask, dtype=np.bool)", "def apply_threshold(heatmap, threshold):\n # Zero out pixels below the threshold\n heatmap[heatmap <= threshold] = 0\n # Return thresholded map\n return heatmap", "def apply_threshold(heatmap, threshold):\n # Zero out pixels below the threshold\n heatmap[heatmap <= threshold] = 0\n # Return thresholded map\n return heatmap", "def apply_threshold(heatmap, threshold):\n # Zero out pixels below the threshold\n heatmap[heatmap <= threshold] = 0\n # Return thresholded map\n return heatmap", "def masked_bilinearsigmoid_cross_entropy(preds, labels, mask, negative_mask):\r\n\r\n loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=preds, labels=labels)\r\n mask += negative_mask\r\n mask = tf.cast(mask, dtype=tf.float32)\r\n # mask /= tf.reduce_mean(mask)\r\n mask = tf.reshape(mask, shape=[79924])\r\n loss *= mask\r\n return tf.reduce_mean(loss)", "def get_binary_image(grayscale_image):\n _, thresholded_image = cv2.threshold(grayscale_image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n return thresholded_image", "def generate_bits(n, p):\n try:\n return (np.random.rand(*n) > p).astype(int)\n except TypeError:\n return (np.random.rand(n) > p).astype(int)", "def imthresh(img):\n img_vecs = img.flatten()\n\n # pre-calculate the histogram and cumulative histogram.\n vbins = np.arange(0, 257, 1)\n img_hist, hist_edges = np.histogram(img_vecs, vbins)\n vbins = (hist_edges[:-1] + hist_edges[1:])/2\n \n hist_times_gray = np.cumsum(img_hist * np.arange(0, 256, 1))\n cum_hist = np.cumsum(img_hist)\n\n # A first approximation of the background mean mean_1 is the mean of the corner pixels.\n # The third corner's index seems to be wrong!\n m, n = img.shape\n sum_bg = np.sum(img_vecs[[0, n - 1, n * (m - 1), m * n - 1]])\n num_pix_bg = 4\n mean1 = sum_bg/4\n mean2 = (np.sum(img_vecs) - sum_bg)/(m *n - num_pix_bg)\n threshold_val = np.uint8(np.ceil((mean1 + mean2)/2))\n\n\n if (threshold_val != 0) and (cum_hist[threshold_val - 1] == 0):\n threshold_val_old = threshold_val\n\n threshold_val_old = 0 # weird\n while threshold_val != threshold_val_old:\n threshold_val_old = threshold_val\n mean1 = hist_times_gray[threshold_val - 1]/cum_hist[threshold_val - 1]\n mean2 = (hist_times_gray[-1] - hist_times_gray[threshold_val - 1])/(cum_hist[-1] - cum_hist[threshold_val - 1])\n\n threshold_val = np.uint8(np.ceil((mean1 + mean2)/2))\n\n\n img_out = img >= threshold_val\n return img_out, threshold_val", "def overlay_prob(image, mask, cutoff=0.5):\n if len(image.shape) == 3:\n image = image[: ,: ,0]\n if len(mask.shape) == 3:\n mask = mask[: ,: ,0]\n if np.amax(image) > 100:\n image = image /255\n\n mask = mask>=cutoff\n mask = mask.astype(int)\n masked = np.ma.masked_where(mask == 0, mask)\n\n plt.figure()\n plt.subplot(1, 2, 1)\n plt.imshow(image, 'gray', interpolation='nearest')\n plt.subplot(1, 2, 2)\n plt.imshow(image, 'gray', interpolation='nearest')\n plt.imshow(masked, 'jet', interpolation='nearest', alpha=0.5)\n plt.show()", "def binary_converter(probs):\n return np.array([[1 - p, p] for p in probs])", "def binary_pod(target_tensor, forecast_probability_tensor):\n\n a = _get_num_true_positives(target_tensor, forecast_probability_tensor)\n c = _get_num_false_negatives(target_tensor, forecast_probability_tensor)\n\n return a / (a + c + K.epsilon())", "def test_compute_binary_probabilities(self):\n nb_images = 100\n height_map = 32\n width_map = 48\n bin_widths_test_0 = numpy.array([2., 2., 2.], dtype=numpy.float32)\n bin_widths_test_1 = numpy.array([0.5, 0.5, 0.5], dtype=numpy.float32)\n truncated_unary_length = 4\n \n y_float32_0 = numpy.random.uniform(low=-10.,\n high=10.,\n size=(nb_images, height_map, width_map, 1)).astype(numpy.float32)\n y_float32_1 = numpy.random.laplace(loc=0.5,\n scale=2.5,\n size=(nb_images, height_map, width_map, 1)).astype(numpy.float32)\n y_float32_2 = numpy.random.standard_cauchy(size=(nb_images, height_map, width_map, 1)).astype(numpy.float32)\n y_float32 = numpy.concatenate((y_float32_0, y_float32_1, y_float32_2),\n axis=3)\n map_mean = numpy.array([0., 0.5, 0.], dtype=numpy.float32)\n binary_probabilities_0 = lossless.stats.compute_binary_probabilities(y_float32,\n bin_widths_test_0,\n map_mean,\n truncated_unary_length)\n print('1st set of test quantization bin widths:')\n print(bin_widths_test_0)\n print('1st absolute centered-quantized latent variable feature map.')\n print('Binary probabilities computed by the function:')\n print(binary_probabilities_0[0, :])\n \n # Let x be a continuous random variable following the\n # uniform distribution of support [-10.0, 10.0]. The\n # probability the 1st binary decision is 0 is written\n # p(|x| <= 1.0) = 1.0/10. The probability the 2nd\n # binary decision is 0 is written\n # p(1.0 <= |x| <= 3.0)/p(|x| >= 1.0) = (2.0/10)/(9.0/10) = 2.0/9.\n # The probability the 3rd binary decision is 0 is written\n # p(3.0 <= |x| <= 5.0)/p(|x| >= 3.0) = (2.0/10)/(7.0/10) = 2.0/7.\n # The above calculations use the cumulative distribution\n # function of the uniform distribution of support [-10.0, 10.0].\n print('Binary probabilities computed by hand:')\n print([1./10, 2./9, 2./7, 2./5])\n print('2nd absolute centered-quantized latent variable feature map.')\n print('Binary probabilities computed by the function:')\n print(binary_probabilities_0[1, :])\n \n # Let x be a continuous random variable following the\n # Laplace distribution of mean 0.0 and scale 2.5. It is\n # said `mean 0.0` as the 2nd latent variable feature map\n # is centered before being quantized. The probability\n # the 1st binary decision is 0 is written\n # p(|x| <= 1.0) = 0.3297. The probability the 2nd binary\n # decision is 0 is written\n # p(1.0 <= |x| <= 3.0)/p(|x| >= 1.0) = 0.3691/0.6703 = 0.5507.\n # The probability the 3rd binary decision is 0 is written\n # p(3.0 <= |x| <= 5.0)/p(|x| >= 3.0) = 0.1659/0.3012 = 0.5507.\n # The above calculations use the cumulative distribution\n # function of the Laplace distribution of mean 0 and scale 2.5.\n print('Binary probabilities computed by hand:')\n print([0.3297, 0.5507, 0.5507, 0.5507])\n print('3rd absolute centered-quantized latent variable feature map.')\n print('Binary probabilities computed by the function:')\n print(binary_probabilities_0[2, :])\n \n # Let x be a continuous random variable following the\n # standard Cauchy distribution. The probability the 1st\n # binary decision is 0 is written p(|x| <= 1.0) = 0.5.\n # The probability the 2nd binary decision is 0 is written\n # p(1.0 <= |x| <= 3.0)/p(|x| >= 1.0) = 0.2952/0.5 = 0.5903.\n # The probability the 3rd binary decision is 0 is written\n # p(3.0 <= |x| <= 5.0)/p(|x| >= 3.0) = 0.079/0.2048 = 0.3865.\n # The above calculations use the cumulative distribution\n # function of the standard Cauchy distribution.\n print('Binary probabilities computed by hand:')\n print([0.5, 0.5903, 0.3865, 0.2811])\n \n binary_probabilities_1 = lossless.stats.compute_binary_probabilities(y_float32,\n bin_widths_test_1,\n map_mean,\n truncated_unary_length)\n print('\\n2nd set of test quantization bin widths:')\n print(bin_widths_test_1)\n print('1st absolute centered-quantized latent variable feature map.')\n print('Binary probabilities computed by the function:')\n print(binary_probabilities_1[0, :])\n \n # Let x be a continuous random variable following the\n # uniform distribution of support [-10.0, 10.0]. The\n # probability the 1st binary decision is 0 is written\n # p(|x| <= 0.25) = 1.0/40. The probability the 2nd\n # binary decision is 0 is written\n # p(0.25 <= |x| <= 0.75)/p(|x| >= 0.25) = (2.0/40)/(39.0/40) = 2.0/39.\n # The probability the 3rd binary decision is 0 is written\n # p(0.75 <= |x| <= 1.25)/p(|x| >= 0.75) = (2.0/40)/(37.0/40) = 2.0/37.\n print('Binary probabilities computed by hand:')\n print([1./40, 2./39, 2./37, 2./35])\n print('2nd absolute centered-quantized latent variable feature map.')\n print('Binary probabilities computed by the function:')\n print(binary_probabilities_1[1, :])\n \n # Let x be a continuous random variable following the\n # Laplace distribution of mean 0.0 and scale 2.5. The\n # probability the 1st binary decision is 0 is written\n # p(|x| <= 0.25) = 0.0952. The probability the 2nd binary\n # decision is 0 is written\n # p(0.25 <= |x| <= 0.75)/p(|x| >= 0.25) = 0.1640/0.9048 = 0.1813.\n # The probability the 3rd binary decision is 0 is written\n # p(0.75 <= |x| <= 1.25)/p(|x| >= 0.75) = 0.1343/0.7408 = 0.1813.\n print('Binary probabilities computed by hand:')\n print([0.0952, 0.1813, 0.1813, 0.1813])\n print('3rd absolute centered-quantized latent variable feature map.')\n print('Binary probabilities computed by the function:')\n print(binary_probabilities_1[2, :])\n \n # Let x be a continuous random variable following the\n # standard Cauchy distribution. The probability the 1st\n # binary decision is 0 is written p(|x| <= 0.25) = 0.1560.\n # The probability the 2nd binary decision is 0 is written\n # p(0.25 <= |x| <= 0.75)/p(|x| >= 0.25) = 0.2537/0.8440 = 0.3006.\n # The probability the 3rd binary decision is 0 is written\n # p(0.75 <= |x| <= 1.25)/p(|x| >= 0.75) = 0.1608/0.5903 = 0.2724.\n print('Binary probabilities computed by hand:')\n print([0.1560, 0.3006, 0.2724, 0.2306])", "def apply_threshold(heatmap, threshold):\n # Zero out pixels below the threshold\n thresh_heatmap = np.copy(heatmap)\n thresh_heatmap[heatmap <= threshold] = 0\n # Return thresholded map\n return thresh_heatmap", "def autothreshold(gray_im, method=\"otsu\"):\n if method == \"otsu\":\n t = otsu(gray_im)\n elif method == \"kmeans\":\n t = ave(kmeans(list(gray_im.getdata())))\n return gray_im.point(lambda x: 0 if x < t else 255) # < or <= ?", "def image_thresholding(image: np.ndarray):\n #  Resize image to a shape of (48, 48)\n image = image_as_square(image)\n\n # Find threshold using Otsu filter\n threshold: float = filters.threshold_otsu(image)\n binary = image > threshold\n\n binary_image = np.where(image, binary, 0) * 255\n\n #  Resize the iamge back to a shape of (2304, )\n return image_as_array(image)" ]
[ "0.77609915", "0.732614", "0.7293136", "0.6731647", "0.6590931", "0.65737176", "0.65595347", "0.6532869", "0.65259445", "0.6520092", "0.6501832", "0.64822376", "0.6414383", "0.6395488", "0.6340759", "0.63036704", "0.6302406", "0.62916476", "0.6282469", "0.62416035", "0.62416035", "0.62394196", "0.62261724", "0.621819", "0.6189172", "0.61887926", "0.61840725", "0.6176497", "0.6151448", "0.6132452", "0.61303127", "0.61166734", "0.6113234", "0.6085611", "0.608456", "0.6075185", "0.6065627", "0.60578257", "0.60476613", "0.60422873", "0.6041469", "0.60316783", "0.6030463", "0.6017128", "0.60132223", "0.60043895", "0.5996108", "0.5983314", "0.5982935", "0.59724355", "0.5970248", "0.59654844", "0.5963989", "0.59458274", "0.5938908", "0.5918479", "0.59111804", "0.5910896", "0.58938736", "0.58878475", "0.58855593", "0.58799344", "0.58723867", "0.587163", "0.58556527", "0.58482844", "0.58470124", "0.58435374", "0.58369935", "0.5836475", "0.5824005", "0.58176756", "0.58162737", "0.5811685", "0.5801695", "0.5798011", "0.57970536", "0.57955", "0.5793806", "0.5791685", "0.5788129", "0.57865185", "0.5785941", "0.5774406", "0.5768338", "0.5764893", "0.57640266", "0.57640266", "0.57640266", "0.5759499", "0.5759417", "0.575472", "0.57541406", "0.5752565", "0.57511437", "0.5748738", "0.5746724", "0.5746607", "0.5744518", "0.5740123" ]
0.70543975
3
Method to compute the coefficients of the straight line between two points
def compute_line_coefs(point_a, point_b): b_coef = -1 if (point_b[0] - point_a[0]) == 0: a_coef = 0 else: a_coef = (point_b[1] - point_a[1]) / (point_b[0] - point_a[0]) c_coef = point_b[1] - a_coef*point_b[0] return np.array([a_coef, b_coef, c_coef])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_linear_coef(p1, p2):\n slope = (p1[1] - p2[1]) / (p1[0] - p2[0])\n intercept = p1[1] - slope * p1[0]\n return slope, intercept", "def generate_line(point_1, point_2):\r\n A = point_1.y - point_2.y\r\n B = point_2.x - point_1.x\r\n C = point_1.y * B + point_1.x * A\r\n return np.matrix([[A],[B],[-C]])", "def line_equation(x1, y1, x2, y2):\n \n a = y2 - y1\n b = x1 - x2\n c = x2*y1 - x1*y2\n return a, b, c", "def line(p1, p2):\n A = (p1[1] - p2[1])\n B = (p2[0] - p1[0])\n C = (p1[0]*p2[1] - p2[0]*p1[1])\n return A, B, -C", "def spline_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope):\n\tC = c_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope)\n\tD = d_coefficients(x1,x2,x3,C)\n\tB = b_coefficients(x1,x2,x3,y1,y2,y3,C,D)\n\tA = a_coefficients(y1,y2)\n\treturn(A,B,C[:2],D)", "def straight_line(p1,p2,spec):\n\tx1,y1 = p1; x2,y2 = p2\n\t\n\tm = (y2-y1) / (x2-x1)\n\tx = np.arange(x1-0.01, x2+0.01, 0.01) \n\n\ty = m *(x-x1) + y1\n\tf = np.vectorize(interp1d(x,y,kind = 'linear'))\n\twave,flux,error = Select_Data(spec,[x1,x2])\n\treturn wave,f(wave)", "def homo_line(a, b):\n return (a[1] - b[1], b[0] - a[0], a[0] * b[1] - a[1] * b[0])", "def a_coefficients(y1,y2):\n\tACoefficients = np.array([\ty1, \\\n\t\t\t\t\t\t\t\ty2 ]).astype(float)\n\treturn(ACoefficients)", "def compute_dual_line(P):\n return Line(P.x, -P.y)", "def line_equation_ap(angle, (x1, y1)):\n \n # get second point on the line\n x2 = float(x1) + cos(angle)\n y2 = float(y1) + sin(angle)\n \n # return A, B and C coefficients\n return (y1 - y2, x2 - x1, x1*y2 - x2*y1)", "def TwoPoints(self, p1, p2):\n\n p1 = base.getvector(p1)\n if len(p1) == 2:\n p1 = np.r_[p1, 1]\n p2 = base.getvector(p2)\n if len(p2) == 2:\n p2 = np.r_[p2, 1]\n\n return Line2(np.cross(p1, p2))", "def calculate_coefficients(self, start, end):\n A = np.array([\n [self.deltaT**3, self.deltaT**4, self.deltaT**5],\n [3 * self.deltaT**2, 4 * self.deltaT**3, 5 * self.deltaT**4],\n [6 * self.deltaT, 12 * self.deltaT**2, 20 * self.deltaT**3],\n ])\n\n a_0, a_1, a_2 = start[0], start[1], start[2] / 2.0\n c_0 = a_0 + a_1 * self.deltaT + a_2 * self.deltaT**2\n c_1 = a_1 + 2 * a_2 * self.deltaT\n c_2 = 2 * a_2\n\n B = np.array([\n end[0] - c_0,\n end[1] - c_1,\n end[2] - c_2\n ])\n\n a_3_4_5 = np.linalg.solve(A, B)\n coeff = np.concatenate((np.array([a_0, a_1, a_2]), a_3_4_5))\n\n return coeff", "def coefA(x0,y0,x1,y1):\n return -(y1-y0)/(x1-x0)", "def get_line_to(self, provided_point):\n\n \"\"\"Calculate slope\"\"\"\n a = (provided_point.y - self.y) / (provided_point.x - self.x)\n\n \"\"\"Calculate b\"\"\"\n b = self.y - a * self.x\n\n return (a,b)", "def c_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope):\n\tC = c_matrix(x1,x2,x3)\n\ty = y_vector(x1,x2,x3,y1,y2,y3,initial_slope,final_slope)\n\tCCoefficients = np.dot(inv(C),y)\n\treturn(CCoefficients)", "def DistPoint2Line(point,line_point1, line_point2=np.array([0,0,0])):\n return np.linalg.norm(np.cross((point-line_point2),(point-line_point1)))/np.linalg.norm(line_point1 - line_point2)", "def fit_line(xs,ys,a,b):\n # Checking against empty list, if empty return 0s\n if not (xs):\n return 0,0,0,0\n \n # Preparing vectors for least square\n z = np.vstack([xs, np.ones(len(xs))]).T\n s = np.array(ys)\n\n # Applying least square fitting on points\n m, c = np.linalg.lstsq(z, np.array(ys))[0] #Applying least squares method\n \n #Using slope and intercept plus y coordinates to get x-coordinates\n x1 = int(a/m - c/m) \n x2 = int(b/m - c/m)\n \n return x1,a,x2,b", "def coefC(x0,y0,x1,y1):\n return (x1*y0-x0*y1)/(x1-x0)", "def get_line_to(self, point):\n\n b = ((self.x - point.x)*point.y - (self.y - point.y)*point.x)/(self.x - point.x)\n\n a = (self.y - point.y)/(self.x - point.x)\n\n return a, b", "def find_line_model(points):\n\n # [WARNING] vertical and horizontal lines should be treated differently\n # here we just add some noise to avoid division by zero\n\n # find a line model for these points\n m = (points[1, 1] - points[0, 1]) / (\n points[1, 0] - points[0, 0] + sys.float_info.epsilon) # slope (gradient) of the line\n c = points[1, 1] - m * points[1, 0] # y-intercept of the line\n\n return m, c", "def line_intercept(p1,p2,p3,p4):\n # Note if vertical line m = None and b holds x-val\n (m1,b1) = line_param(p1,p2)\n (m2,b2) = line_param(p3,p4)\n if (m1 != None) and (m2 != None):\n if (m1-m2) != 0.:\n x = (b2-b1)/(m1-m2)\n y = m1*x + b1\n else:\n return (None,0)\n elif (m1 == None) and (m2 != None):\n x = b1 \n y = m2*x + b2\n elif (m1 != None) and (m2 == None):\n x = b2\n y = m1*x + b1\n else:\n return (None,0) \n \n # min and max of points. \n max_x1 = max(p1[0], p2[0])\n min_x1 = min(p1[0], p2[0])\n max_y1 = max(p1[1], p2[1])\n min_y1 = min(p1[1], p2[1])\n max_x2 = max(p3[0], p4[0])\n min_x2 = min(p3[0], p4[0])\n max_y2 = max(p3[1], p4[1])\n min_y2 = min(p3[1], p4[1])\n #check if the intersection is in bounds\n flag = 1\n if x > max_x1 or x < min_x1:\n flag = 0\n elif x > max_x2 or x < min_x2:\n flag = 0\n elif y > max_y1 or y < min_y1: \n flag = 0\n elif y > max_y2 or y < min_y2: \n flag = 0\n #check if the intersection point corresponds to an end point\n intercept = num.array([x,y])\n def _same(p1,p2,prec=0.0001):\n \"\"\" are two points the same \"\"\"\n #return num.all(num.equal(p1,p2))\n t1 = num.fabs(p1[0]-p2[0]) < prec\n t2 = num.fabs(p1[1]-p2[1]) < prec\n if t1 and t2:\n #print \"same\", p1,p2\n return True\n if flag == 1:\n if _same(intercept,p1):\n flag = 2\n elif _same(intercept,p2):\n flag = 2\n elif _same(intercept,p3):\n flag = 2\n elif _same(intercept,p4):\n flag = 2\n return (intercept,flag)", "def line_contribution(p1,p2,alpha = 1):\n\n adjust = np.zeros((worksize,worksize,2))\n\n x1 = p1[0]\n y1 = p1[1]\n x2 = p2[0]\n y2 = p2[1]\n\n coordinates = coordinate_matrix(worksize)\n numerator = np.sum(np.multiply(coordinates,np.reshape(np.array(((y2-y1,-(x2-x1)))),(2,1,1))),axis = 0) + x2*y1 - y2*x1\n dist_from_line = np.abs(numerator) * (1.0/np.sqrt((y2-y1)**2+(x2-x1)**2))\n xcontribution = (x2-x1)*(1/(alpha*dist_from_line+1))\n ycontribution = (y2-y1)*(1/(alpha*dist_from_line+1))\n\n\n return np.array((-ycontribution,xcontribution))/np.sqrt((y2-y1)**2+(x2-x1)**2)", "def slope(point_a, point_b, flip):\n\n x_a, y_a = point_a\n x_b, y_b = point_b\n\n dx = x_b - x_a\n dy = y_b - y_a\n\n return -dx / dy if flip else dy / dx", "def slope(x1, y1, x2, y2):\r\n delta_y = y2-y1\r\n delta_x = x2-x1\r\n return delta_y / delta_x", "def line(intercept, slope, x):\n return slope*x + intercept", "def line(x1, y1, x2, y2):\r\n\r\n x1 = normalize(x1)\r\n y1 = normalize(y1)\r\n x2 = normalize(x2)\r\n y2 = normalize(y2)\r\n\r\n xdiff = max(x1, x2) - min(x1, x2)\r\n ydiff = max(y1, y2) - min(y1, y2)\r\n xdir = 1 if x1 <= x2 else -1\r\n ydir = 1 if y1 <= y2 else -1\r\n\r\n r = max(xdiff, ydiff)\r\n\r\n for i in range(r+1):\r\n x = x1\r\n y = y1\r\n\r\n if ydiff:\r\n y += (float(i) * ydiff) / r * ydir\r\n if xdiff:\r\n x += (float(i) * xdiff) / r * xdir\r\n\r\n yield (x, y)", "def slope(x1, y1, x2, y2):\n return (y2 - y1) / (x2 - x1)", "def abline(points, slope, intercept):\n x_values = get_column(points, 0)\n return [slope * i + intercept for i in x_values]", "def drawLine2P(x,y,xlims):\n \n xrange = np.arange(xlims[0],xlims[1],1)\n A = np.vstack([x, np.ones(len(x))]).T\n k, b = np.linalg.lstsq(A, y, rcond=None)[0]\n return [xrange, k*xrange + b]", "def _distance(point, line_point1, line_point2):\n vec1 = line_point1 - point\n vec2 = line_point2 - point\n distance = np.abs(np.cross(vec1,vec2)) / np.linalg.norm(line_point1-line_point2)\n return distance", "def intercept(x1, y1, x2, y2):\r\n m = slope(x1, y1, x2, y2)\r\n return y1 - m*x1", "def point_line_dist2(p, l1, l2):\n p, l1, l2 = np.asarray(p), np.asarray(l1), np.asarray(l2)\n ap = l1 - p\n n = l2 - l1\n n /= np.sqrt(sum(n**2))\n dist = ap - np.outer(n, np.dot(ap, n)).T\n return np.sum(dist**2, 1)", "def distance_point_to_line(x1, y1, a, b, c):\n d = abs((a * x1 + b * y1 + c)) / (math.sqrt(a * a + b * b))\n #print(\"Distance from ({}, {}) to line {}x+{}y+{}=0 is {}\".format(\n # x1, y1, a, b, c, d))\n return(d)", "def main(self,xpoints,ypoints,numOfWaypoints):\n WaypointsX = np.zeros(((numOfWaypoints)*(len(xpoints)-1)))\n WaypointsY = np.zeros(((numOfWaypoints)*(len(xpoints)-1)))\n\n for i in range(len(xpoints)-1):\n x = np.linspace(xpoints[i],xpoints[i+1],numOfWaypoints)\n try:\n slope = ((ypoints[i+1])-(ypoints[i]))/((xpoints[i+1])-(xpoints[i]))\n coefficient = (ypoints[i])-(slope)*(xpoints[i])\n y = (slope)*x+coefficient\n except ZeroDivisionError:\n x = np.linspace(xpoints[i],xpoints[i],numOfWaypoints)\n y = np.linspace(ypoints[i],ypoints[i+1],numOfWaypoints)\n WaypointsX[((numOfWaypoints)*(i)):((numOfWaypoints)*(i+1))]=x\n WaypointsY[((numOfWaypoints)*(i)):((numOfWaypoints)*(i+1))]=y\n\n return WaypointsX, WaypointsY", "def _calculate_slope(klass, p1, p2):\n xdiff = p1.x - p2.x\n if xdiff:\n return (p1.y - p2.y) / xdiff\n else:\n return float(\"+inf\")", "def line_param(point_a, point_b, t):\n new_point = point_a - point_b\n return point_b + t*new_point", "def crossLine(self, other):\n a, b = self.point\n c, d = other.point\n m, n = self.vector\n o, p = other.vector\n if n * o == m * p: # The lines are parallels\n return None\n elif self.angle == -math.pi / 2:\n return Point(a, d)\n elif other.angle == -math.pi / 2:\n return Point(b, c)\n else:\n x = (a * n * o - b * m * o - c * m * p + d * m * o) / (n * o - m * p)\n y = (x - a) * n / m + b\n return Point(x, y)", "def line_intersection_with(self, other):\n # solve following system :\n # intersection = start of self + alpha * direction of self\n # intersection = start of other + beta * direction of other\n directions = [s.endpoints[1] - s.endpoints[0] for s in (self, other)]\n denominator = directions[0].cross_product(directions[1])\n if abs(denominator) < 0.000001:\n # almost parallel lines\n return\n start_diff = other.endpoints[0] - self.endpoints[0]\n alpha = start_diff.cross_product(directions[1]) / denominator\n return self.endpoints[0] + directions[0] * alpha", "def _dist_point2line(self, point: ndarray,\n line: Tuple[ndarray, ndarray]) -> ndarray:\n\n assert isinstance(line, tuple)\n point1, point2 = line\n d = abs(np.cross(point2 - point1, point - point1)) / (\n norm(point2 - point1) + 1e-8)\n return d", "def slip_to_coefficients(x, y, a):\n partials = np.zeros((x.size, 3))\n partials[:, 0] = (x / a) * (9 * (x / a) / 8 - 3 / 4)\n partials[:, 1] = (1 - 3 * (x / a) / 2) * (1 + 3 * (x / a) / 2)\n partials[:, 2] = (x / a) * (9 * (x / a) / 8 + 3 / 4)\n coefficients = np.linalg.inv(partials) @ y\n return coefficients", "def intersection(line1, line2):\n p0, p1, p2, p3 = map(\n lambda tup : np.array(tup[:2]),\n [line1[0], line1[1], line2[0], line2[1]]\n )\n p1, p2, p3 = map(lambda x : x - p0, [p1, p2, p3])\n transform = np.zeros((2, 2))\n transform[:,0], transform[:,1] = p1, p2\n if np.linalg.det(transform) == 0: return\n inv = np.linalg.inv(transform)\n new_p3 = np.dot(inv, p3.reshape((2, 1)))\n #Where does line connecting (0, 1) to new_p3 hit x axis\n x_intercept = new_p3[0] / (1 - new_p3[1]) \n result = np.dot(transform, [[x_intercept], [0]])\n result = result.reshape((2,)) + p0\n return result", "def _homogenous_line(A,B):\n if A==B: raise ValueError('Degenerate line through %s and %s' % (repr(A),repr(B)))\n\n Ax,Ay=A\n Bx,By=B\n # keep magnitude of coefficients as close to unity as possible\n if abs(Bx-Ax)>=abs(By-Ay):\n a,b=float(By-Ay)/float(Bx-Ax), -1\n c=Ay-a*Ax\n else:\n a,b=-1, float(Bx-Ax)/float(By-Ay)\n c=Ax-b*Ay\n\n return a,b,c", "def point_to_line_dist(P, A, B):\n\tif all(A == P) or all(B == P):\n\t\treturn0\n\tif arccos(dot((P - A) / norm(P - A), (B - A) / norm(B - A))) > pi / 2:\n\t\treturn norm(P - A)\n\tif arccos(dot((P - B) / norm(P - B), (A - B) / norm(A - B))) > pi / 2:\n\t\treturn norm(P - B)\n\treturn norm(cross(A-B, A-P))/norm(B-A)", "def get_line_distance(self, p):\n\n y = 1000 * p.y\n R = 1000 * self.geometry.R\n x = copysign(sqrt(y ** 2 + (R - sqrt(R ** 2 - y ** 2))), y)\n x = 2 * R * asin(x / (2 * R))\n #x=y\n b = -x / sqrt(R ** 2 - x ** 2)\n theta = atan(b) # grating tangent angle\n print b, theta\n d = 0\n for n, a in enumerate(self.an):\n d += a * x ** n\n d *= cos(theta)\n return 1e-3 / d", "def line_fit(x,y):\n\t# clean\n\tx = np.squeeze(x)\n\ty = np.squeeze(y)\n\t# concatenate\n\txy = np.concatenate((x[:,np.newaxis],y[:,np.newaxis]),1)\n\t# sort by x values\n\txy = xy[xy[:,0].argsort()]\n\t#print(xy)\n\tf = lambda x,m,b : m*x+b\n\tpars,_ = opt.curve_fit(f,xy[:,0],xy[:,1])\n\tm = pars[0]\n\tb = pars[1]\n\tpts = np.zeros((2,2))\n\tpts[0,0] = xy[0,0]\n\tpts[1,0] = xy[-1,0]\n\tpts[:,1] = pts[:,0]*m+b\n\tsig = np.std((xy[:,1]-f(xy[:,0],m,b)))\n\treturn pts, sig", "def midpoint_line(a, b):\n return scale_vector(add_vectors(a, b), 0.5)", "def intersection(v1, v2):\n x = v1[0:2] + v2[0:2]\n y = v1[2:4] + v2[2:4]\n if( x[3] == 0 ): #To avoid a divide by zero, if x[3] is 0 then we just solve for where lineA equals x[2]\n t1 = (x[2] - x[0])/\\\n (x[1])\n return [ v1[0] + v1[1]*t1, v1[2] + v1[3]*t1 ]\n\n else: \n t1 = ( y[0] - y[2] + (y[3]/x[3])*(x[2] - x[0]) )/\\\n ( (y[3]*x[1])/x[3] - y[1] )\n return [ v1[0] + v1[1]*t1, v1[2] + v1[3]*t1 ]", "def get_points_for_thick_line(start_x: float, start_y: float,\r\n end_x: float, end_y: float,\r\n line_width: float):\r\n vector_x = start_x - end_x\r\n vector_y = start_y - end_y\r\n perpendicular_x = vector_y\r\n perpendicular_y = -vector_x\r\n length = math.sqrt(vector_x * vector_x + vector_y * vector_y)\r\n if length == 0:\r\n normal_x = 1.0\r\n normal_y = 1.0\r\n else:\r\n normal_x = perpendicular_x / length\r\n normal_y = perpendicular_y / length\r\n r1_x = start_x + normal_x * line_width / 2\r\n r1_y = start_y + normal_y * line_width / 2\r\n r2_x = start_x - normal_x * line_width / 2\r\n r2_y = start_y - normal_y * line_width / 2\r\n r3_x = end_x + normal_x * line_width / 2\r\n r3_y = end_y + normal_y * line_width / 2\r\n r4_x = end_x - normal_x * line_width / 2\r\n r4_y = end_y - normal_y * line_width / 2\r\n points = (r1_x, r1_y), (r2_x, r2_y), (r4_x, r4_y), (r3_x, r3_y)\r\n return points", "def slope(a, b):\r\n if a[0] == b[0]: #If the x values are both 0\r\n return 0 #Technically, undefined, but doesn't matter for finding collinearity\r\n return (a[1] - b[1]) / (a[0] - b[0])", "def _line_from_two_points(pt1: np.array, pt2: np.array) -> np.array:\n numLine = pt1.shape[0]\n lines = np.zeros((numLine, 6))\n n = np.cross(pt1, pt2)\n n = n / (matlib.repmat(np.sqrt(np.sum(n ** 2, 1, keepdims=True)), 1, 3) + 1e-9)\n lines[:, 0:3] = n\n\n areaXY = np.abs(np.sum(n * matlib.repmat([0, 0, 1], numLine, 1), 1, keepdims=True))\n areaYZ = np.abs(np.sum(n * matlib.repmat([1, 0, 0], numLine, 1), 1, keepdims=True))\n areaZX = np.abs(np.sum(n * matlib.repmat([0, 1, 0], numLine, 1), 1, keepdims=True))\n planeIDs = np.argmax(np.hstack([areaXY, areaYZ, areaZX]), axis=1) + 1\n lines[:, 3] = planeIDs\n\n for i in range(numLine):\n uv = _xyz2uvN(np.vstack([pt1[i, :], pt2[i, :]]), lines[i, 3])\n umax = uv[:, 0].max() + np.pi\n umin = uv[:, 0].min() + np.pi\n if umax - umin > np.pi:\n lines[i, 4:6] = np.array([umax, umin]) / 2 / np.pi\n else:\n lines[i, 4:6] = np.array([umin, umax]) / 2 / np.pi\n\n return lines", "def find_slope(lat1,lon1,lat2,lon2):\n return (lon2-lon1)/(lat2-lat1)", "def make_line_points(y1, y2, line):\n if line is None:\n return None\n\n slope, intercept = line\n\n # make sure everything is integer as cv2.line requires it\n x1 = int((y1 - intercept) / slope)\n x2 = int((y2 - intercept) / slope)\n y1 = int(y1)\n y2 = int(y2)\n\n return ((x1, y1), (x2, y2))", "def calculate_line_length(x1, y1, x2, y2):\n distance = np.sqrt((x2 - x1)**2 + (y2 - y1)**2)\n return distance", "def distance_to_line(a, b, p):\n return distance(closest_point(a, b, p), p)", "def line_param(v1,v2):\n if (v1[0]-v2[0] != 0.):\n m = (v1[1] - v2[1])/(v1[0] - v2[0])\n b = -m*v1[0] + v1[1]\n if num.fabs(m)>1.0e6:\n m = None\n b = v1[0]\n else: \n m = None\n b = v1[0]\n return (m,b)", "def line_ccw(a, b, p):\n return (p[1] - a[1]) * (b[0] - a[0]) > (b[1] - a[1]) * (p[0] - a[0])", "def distanceOfTwoLines(p1, v1, p2, v2):\n # if we transform multiple points in one go\n if len(v1.shape) == 2:\n a1 = np.einsum('ij,ij->i', v1, v1)\n a2 = np.einsum('ij,ij->i', v1, v2)\n b1 = -np.einsum('ij,ij->i', v2, v1)\n b2 = -np.einsum('ij,ij->i', v2, v2)\n c1 = -np.einsum('ij,j->i', v1, p1 - p2)\n c2 = -np.einsum('ij,j->i', v2, p1 - p2)\n res = np.linalg.solve(np.array([[a1, b1], [a2, b2]]).transpose(2, 0, 1), np.array([c1, c2]).T)\n res = res[:, None, :]\n return np.linalg.norm((p1 + res[..., 0] * v1) - (p2 + res[..., 1] * v2), axis=1)\n else: # or just one point\n a1 = np.dot(v1, v1)\n a2 = np.dot(v1, v2)\n b1 = -np.dot(v2, v1)\n b2 = -np.dot(v2, v2)\n c1 = -np.dot(v1, p1 - p2)\n c2 = -np.dot(v2, p1 - p2)\n try:\n res = np.linalg.solve(np.array([[a1, b1], [a2, b2]]), np.array([c1, c2]))\n except np.linalg.LinAlgError:\n return 0\n res = res[None, None, :]\n return np.linalg.norm((p1 + res[..., 0] * v1) - (p2 + res[..., 1] * v2), axis=1)[0]", "def is_on_line(point_a, point_b, point_c):\r\n return (point_b[0] - point_a[0]) * (point_c[1] - point_a[1]) - (point_b[1] - point_a[1]) * (point_c[0] - point_a[0])", "def my_linear_polyfit(raw_lines):\n \n x = []\n y = []\n weight = []\n \n # Build arrays of all x, y, and weight points\n for line in raw_lines:\n for x1, y1, x2, y2 in line:\n x.extend([x1, x2])\n y.extend([y1, y2])\n line_length = np.sqrt((x2-x1)**2 + (y2-y1)**2)\n weight.extend([line_length, line_length])\n \n # Apply weighted linear polyfit\n z = np.polyfit(x, y, 1, w=weight)\n \n # Output fit line, z = [m, b]\n return z", "def perpendicularIntersection(point, linePoint1, linePoint2):\n\t\tx1 = linePoint1[0]\n\t\ty1 = linePoint1[1]\n\t\tx2 = linePoint2[0]\n\t\ty2 = linePoint2[1]\n\t\tx3 = point[0]\n\t\ty3 = point[1]\n\t\tk = ((y2-y1) * (x3-x1) - (x2-x1) * (y3-y1)) / ((y2-y1)**2 + (x2-x1)**2)\n\t\tx4 = x3 - k * (y2-y1)\n\t\ty4 = y3 + k * (x2-x1)\n\t\treturn (x4, y4)", "def slope(a, b):\n a1, a2 = PC_matrix[:, 0][a], PC_matrix[:, 1][a]\n b1, b2 = PC_matrix[:, 0][b], PC_matrix[:, 1][b]\n \n return b1-a1, b2-a2", "def line(x0: float, y0: float, x1: float, y1: float) -> LineCollection:\n return LineCollection([(complex(x0, y0), complex(x1, y1))])", "def calc_slope(self, left, right):\n return (left[1] - right[1]) / (left[0] - right[0])", "def Solver(line1, line2):\n\ta = np.array(line1[0])\n\tb = np.array(line1[1])\n\tu = np.array(line2[0])\n\tv = np.array(line2[1])\n\t#print(a,b,u,v)\n\tc = u[:2]-a[:2]\n\tA = np.vstack((b[:2],-v[:2])).T\n\t#print(A)\n\tx = np.linalg.solve(A,c)\n\t#print(x)\n\tp = a+x[0]*b\n\t#print(p)\n\treturn p", "def linearfit(x,y):\n fit = np.polyfit(x,y,1)\n fit_fn = np.poly1d(fit)\n yy = fit_fn(x) \n \n return yy", "def intersect_2_lines(P1, V1, P2, V2):\n Vx = np.cross(V1, V2)\n s = np.dot(np.cross(P2 - P1, V1), Vx)/np.dot(Vx, Vx)\n return s", "def linear_attenuation_coefficient(self, lines):\n wl = lines.to(\"nm\", \"spectroscopy\").magnitude\n if isarray(wl):\n return [self.getExtinctionCoefficient(l) for l in wl]\n else:\n return self.getExtinctionCoefficient(wl)", "def linear(x2, y2, N=100):\n\n m = y2 / x2\n x = np.linspace(0, x2, N)\n y = m*x\n\n # The time of travel\n T = np.sqrt(2*(1+m**2)/g/m * x2)\n print('T(linear) = {:.3f}'.format(T))\n return x, y, T", "def eDouble(P): #adding P + P by using a tangent line\r\n R = point(0, 0, P.c)\r\n i = ( (3 * P.x ** 2) + P.c.a) #the slope equation (i/j)\r\n j = (2 * P.y)\r\n s = (i * modInv(j, P.c.p) ) % P.c.p\r\n R.x = ( (s ** 2) - 2 * P.x) % P.c.p\r\n R.y = (-P.y + s * (P.x - R.x) ) % P.c.p\r\n return R", "def distance_point_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n pa = subtract_vectors(a, point)\n pb = subtract_vectors(b, point)\n l = length_vector(cross_vectors(pa, pb))\n l_ab = length_vector(ab)\n return l / l_ab", "def find_slopes(x, y):\n slopes = np.zeros((len(x) - 1))\n for i in range(len(x) - 1):\n # m = (y2 - y1) / (x2 - x1)\n delta_x = x[i + 1] - x[i]\n delta_y = y[i + 1] - y[i]\n slopes[i] = delta_y / delta_x\n return slopes", "def coefficients(self) :\n raise NotImplementedError", "def distance_line_line(l1, l2, tol=0.0):\n a, b = l1\n c, d = l2\n ab = subtract_vectors(b, a)\n cd = subtract_vectors(d, c)\n ac = subtract_vectors(c, a)\n n = cross_vectors(ab, cd)\n l = length_vector(n)\n if l <= tol:\n return distance_point_point(closest_point_on_line(l1[0], l2), l1[0])\n n = scale_vector(n, 1.0 / l)\n return fabs(dot_vectors(n, ac))", "def b_coefficients(x1,x2,x3,y1,y2,y3,CCoefficients,DCoefficients):\n\tBCoefficients = np.array([\t((y2-y1)/(x2-x1)-CCoefficients[0]*(x2-x1) - DCoefficients[0]*((x2-x1)**2)), \\\n\t\t\t\t\t\t\t\t((y3-y2)/(x3-x2)-CCoefficients[1]*(x3-x2) - DCoefficients[1]*((x3-x2)**2)) \t]).astype(float)\n\treturn(BCoefficients)", "def sign_line(pt, P1, P2):\n x1, y1 = P1\n x2, y2 = P2\n x, y = pt\n\n return np.sign((x - x1)*(y2 - y1) - (y-y1)*(x2-x1))", "def _distance2_line_endpoints(line1, line2):\n (A,B),(C,D) = line1, line2\n R2=lambda u,v: (u[0]-v[0])**2+(u[1]-v[1])**2\n pairs = zip((A,A,B,B),(C,D,C,D))\n r2 = [R2(pair[0],pair[1]) for pair in pairs]\n mini=sorted(zip(r2,pairs),key=lambda a,b: a)[0]\n #R2_min = min((R2(A,C), R2(A,D), R2(B,C), R2(B,D)))\n return mini[0], mini[1][0], mini[1][1]", "def linePointXY(l,p,inside=True,distance=False,params=False):\n a=l[0]\n b=l[1]\n # check for degenerate case of zero-length line\n abdist = dist(a,b)\n if abdist < epsilon:\n #raise ValueError('zero-length line passed to linePointXY')\n print('zero-length line passed to linePointXY')\n return False\n\n if distance and params:\n raise ValueError('incompatible distance and params parameters passed to linePointXY')\n\n x0=p[0]\n y0=p[1]\n z0=p[2]\n x1=a[0]\n y1=a[1]\n z1=a[2]\n x2=b[0]\n y2=b[1]\n z2=b[2]\n\n ## check to see if all three points lie in the same x,y plane\n if not isXYPlanar([p,a,b]):\n raise ValueError('non-XY points in linePointXY call')\n return false\n # if abs(z1-z0) > epsilon or abs(z2-z0) > epsilon:\n # return False\n\n linedist = abs( ((y2-y1)*x0 - (x2-x1)*y0 + x2*y1 - y2*x1)/abdist)\n\n ## this is the fast case:\n if not inside and distance:\n return linedist\n \n ## find out where the intersection between the original line and a\n ## line defined by the point and an orthogonal direction vector\n ## is. We do this by constructing two direction vectors\n ## orthogonal to the orgiginal line scaled by the line distance,\n ## and adding them to the point in question. Assuming that the\n ## line distance is not zero, only one of these constructed points\n ## will fall on the line\n\n ## compute unit direction vector for original line\n dir = sub(b,a)\n dir = scale3(dir,1.0/mag(dir))\n\n ## compute two orthogonal direction vectors of length linedist\n ordir1 = scale3(orthoXY(dir),linedist)\n ordir2 = scale3(ordir1, -1.0)\n \n ## there are two possible intersection points\n pi1 = add(p,ordir1)\n pi2 = add(p,ordir2)\n\n ## compute distances\n d1pa = dist(a,pi1)\n d1pb = dist(pi1,b)\n d1 = d1pa+d1pb # \"triangle\" with pi1\n\n d2pa = dist(a,pi2)\n d2pb = dist(pi2,b)\n d2 = d2pa+d2pb # \"triangle\" with pi2\n\n ## the shortest \"triangle\" distance will signal the point that\n ## is actually on the line, even if that point falls outside\n ## the a,b line interval\n \n if params or not inside: # if we don't care about being inside the\n # line segment\n if d1 <= d2:\n if distance:\n return d1\n elif params:\n return d1pb/abdist\n else:\n return pi1\n else:\n if distance:\n return d2\n elif params:\n return d2pb/abdist\n else:\n return pi2\n \n \n ## if the closest point on the line to point p lies between\n ## the endpoints of the line, then either d1 or d2 will equal\n ## abdist. IF neither do, then we know that the closest point lies\n ## outside the endpoints\n\n if abs(d1-abdist) < epsilon:\n if distance:\n return linedist\n else:\n return pi1\n\n if abs(d2-abdist) < epsilon:\n if distance:\n return linedist\n else:\n return pi2\n\n ## closest point is outside the interval. That means that the\n ## distance from point p to whichever endpoint is smaller is the\n ## closest distance\n\n d3 = dist(a,p)\n d4 = dist(b,p)\n\n if d3 < d4:\n if distance:\n return d3\n else:\n return a\n else:\n if distance:\n return d4\n else:\n return b", "def get_linear_distance(p1,p2):\r\n if p1[0] < p2[0]:\r\n d = 0\r\n else:\r\n d = np.sqrt((p1[0]-p2[0])**2.0 + (p1[1]-p2[1])**2.0)\r\n return d", "def _distance2_point_to_h_line(point, h_line):\n a,b,c = h_line\n x0,y0 = point\n # solve for equality\n # r^2 = (x-x0)^2 + (y-y0)^2\n # ax + by + c = 0\n # --> 2nd order polynomial\n # --> find place of exactly one solution, i.e.\n # radicant of p-q formula is identical zero\n # if radicant is zero, then\n ys = ((a*x0-c)*b + a**2*y0)/(a**2+b**2)\n # or\n xs = ((b*y0-c)*a + b**2*x0)/(a**2+b**2)\n # for a != 0\n if abs(a)>=abs(b):\n R2 = (x0-c/a)**2+y0**2 - (1.+(b/a)**2)*ys**2\n else:\n R2 = (y0-c/b)**2+x0**2 - (1.+(a/b)**2)*xs**2\n R2 = R2 if abs(R2)>1e-13 else 0.\n return R2, (xs, ys)", "def line_points(a=0, b=0, c=0, ref=[-1.0, 1.0]):\n\n if (a == 0) and (b == 0):\n raise Exception(\"linePoints: a and b cannot both be zero\")\n\n return [(-c / a, p) if b == 0 else (p, (-c - a * p) / b) for p in ref]", "def distance_point_line_3d(point: Vector, start: Vector, end: Vector) -> float:\n if start.isclose(end):\n raise ZeroDivisionError('Not a line.')\n v1 = point - start\n # point projected onto line start to end:\n v2 = (end - start).project(v1)\n # Pythagoras:\n return math.sqrt(v1.magnitude_square - v2.magnitude_square)", "def line_plane(l, p):\n d = dot((p.o - l.o), p.n) / dot(l.d, p.n)\n return l(d)", "def _distance_to_line(begin, end, point):\n return _vec_distance(point, _nearest_point_on_line(begin, end, point))", "def linear(m, b, x, xx):\n y = m*(x - xx) + b\n return y", "def lin_interpol(x_p, y_p):\r\n f = np.zeros([ x_p.shape[0] - 1 , 4 ]) # Coefficents and interval array\r\n \r\n for i in range( x_p.shape[0] - 1 ): # for every x[i], x[i+1] pair\r\n \r\n x_coeff = (y_p[i+1] - y_p[i]) / (x_p[i+1] - x_p[i])\r\n const = (x_p[i+1]*y_p[i] - x_p[i]*y_p[i+1] ) / (x_p[i+1] - x_p[i])\r\n \r\n # save the x coefficent, constant and the interval for this line\r\n f[i,:] = x_coeff, const, x_p[i], x_p[i+1]\r\n \r\n for a, b, start, end in f: # for every line fitted\r\n line_x = np.linspace( start, end, 3) # points to plot in x_range\r\n line_y = line_x * a + b # find the fitted line value at these points\r\n plt.plot(line_x,line_y,'k--', lw = 1, label = 'Linear' if a==f[0][0] else \"\") # only label one plot\r", "def _get_slope(x, y):\n slope = linregress(x, y)\n return slope", "def line_line_intersection(a1: Vector3, a2: Vector3, b1: Vector3, b2: Vector3) -> Vector3:\n # From https://stackoverflow.com/a/20677983/7245441\n\n def det(a: Vector3, b: Vector3) -> float:\n return a.x * b.y - a.y * b.x\n\n y_diff = Vector3(a1.y - a2.y, b1.y - b2.y, 0)\n x_diff = Vector3(a1.x - a2.x, b1.x - b2.x, 0)\n\n div = det(x_diff, y_diff)\n if div == 0:\n raise Exception(\"Lines do not intersect\")\n\n d = Vector3(det(a1, a2), det(b1, b2), 0)\n x = det(d, x_diff) / div\n y = det(d, y_diff) / div\n\n return Vector3(x, y, 0)", "def line_points(start, end):\n # Setup initial conditions\n x1, y1 = start.astuple()\n x2, y2 = end.astuple()\n dx = x2 - x1\n dy = y2 - y1\n \n # Determine how steep the line is\n is_steep = abs(dy) > abs(dx)\n \n # Rotate line\n if is_steep:\n x1, y1 = y1, x1\n x2, y2 = y2, x2\n \n # Swap start and end points if necessary and store swap state\n swapped = False\n if x1 > x2:\n x1, x2 = x2, x1\n y1, y2 = y2, y1\n swapped = True\n \n # Recalculate differentials\n dx = x2 - x1\n dy = y2 - y1\n \n # Calculate error\n error = int(dx / 2.0)\n ystep = 1 if y1 < y2 else -1\n \n # Iterate over bounding box generating points between start and end\n y = y1\n points = []\n for x in range(x1, x2 + 1):\n coord = Int2(y, x) if is_steep else Int2(x, y)\n points.append(coord)\n error -= abs(dy)\n if error < 0:\n y += ystep\n error += dx\n \n # Reverse the list if the coordinates were swapped\n if swapped:\n points.reverse()\n return points", "def getLine(self, **kwargs):\n return Line(self.p1, self.angle, **kwargs)", "def get_intersect_points(line1, line2):\n intersect_points = matrix.matrix_sol([line1, line2])\n return intersect_points", "def llincc(x,y):\r\n covar = lcov(x,y)*(len(x)-1)/float(len(x)) # correct denom to n\r\n xvar = lvar(x)*(len(x)-1)/float(len(x)) # correct denom to n\r\n yvar = lvar(y)*(len(y)-1)/float(len(y)) # correct denom to n\r\n lincc = (2 * covar) / ((xvar+yvar) +((amean(x)-amean(y))**2))\r\n return lincc", "def straight_line(numbers, p_current, relative = False):\n if len(numbers) != 2:\n return None\n\n p_next = Point(numbers[0], numbers[1])\n if relative: #relative\n p_next += p_current\n\n return Line(p_current, p_next)", "def intersectionOfTwoLines(p1, v1, p2, v2):\n # if we transform multiple points in one go\n if len(v1.shape) == 2:\n a1 = np.einsum('ij,ij->i', v1, v1)\n a2 = np.einsum('ij,ij->i', v1, v2)\n b1 = -np.einsum('ij,ij->i', v2, v1)\n b2 = -np.einsum('ij,ij->i', v2, v2)\n c1 = -np.einsum('ij,j->i', v1, p1 - p2)\n c2 = -np.einsum('ij,j->i', v2, p1 - p2)\n res = np.linalg.solve(np.array([[a1, b1], [a2, b2]]).transpose(2, 0, 1), np.array([c1, c2]).T)\n res = res[:, None, :]\n return np.mean([p1 + res[..., 0] * v1, p2 + res[..., 1] * v2], axis=0)\n else: # or just one point\n a1 = np.dot(v1, v1)\n a2 = np.dot(v1, v2)\n b1 = -np.dot(v2, v1)\n b2 = -np.dot(v2, v2)\n c1 = -np.dot(v1, p1 - p2)\n c2 = -np.dot(v2, p1 - p2)\n try:\n res = np.linalg.solve(np.array([[a1, b1], [a2, b2]]), np.array([c1, c2]))\n except np.linalg.LinAlgError:\n return np.ones(3)*np.nan\n res = res[None, None, :]\n return np.mean([p1 + res[..., 0] * v1, p2 + res[..., 1] * v2], axis=0)[0]", "def intersection(line1, line2):\r\n rho1, theta1 = line1[0]\r\n rho2, theta2 = line2[0]\r\n A = np.array([[np.cos(theta1), np.sin(theta1)], [np.cos(theta2), np.sin(theta2)]])\r\n b = np.array([[rho1], [rho2]])\r\n x0, y0 = np.linalg.solve(A, b)\r\n x0, y0 = int(np.round(x0)), int(np.round(y0))\r\n return [[x0, y0]]", "def d2(x0,y0,x1,y1):\n return (x0-x1)*(x0-x1) + (y0-y1)*(y0-y1)", "def line(self, P, Q, R):\n if not R or R == self.infpoint:\n raise ValueError(\"R must not be zero\")\n if P == Q == self.infpoint:\n return self.basefield.one\n if P == self.infpoint:\n return R[0] - Q[0]\n if Q == self.infpoint:\n return R[0] - P[0]\n if P[0] != Q[0]:\n return (Q[0] - P[0]) * R[1] - (Q[1] - P[1]) * \\\n R[0]- Q[0] * P[1] + P[0] * Q[1]\n if P[1] != Q[1]:\n return R[0] - P[0]\n return (3 * P[0] ** 2 + 2 * self.a2 * P[0] + self.a4 - self.a1 * P[1] ) * \\\n R[0] - (2 * P[1] + self.a1 * P[0] + self.a3 ) * R[1] - \\\n (P[0] ** 3) + self.a4 * P[0] + 2 * self.a6 - self.a3 * P[1]", "def _lines_intersection(self, other):\n\n the_slope, the_y_intercept = False, False\n\n # parallel?\n if self.slope == other.slope:\n return (\n self.y_intercept == other.y_intercept and\n self.x_value == other.x_value\n )\n\n if self.is_vertical():\n x = self.x_value\n the_slope = other.slope\n the_y_intercept = other.y_intercept\n elif other.is_vertical():\n x = other.x_value\n else:\n x = (other.y_intercept - self.y_intercept) / (self.slope - other.slope)\n\n if the_slope is None or the_slope is False:\n the_slope = self.slope\n the_y_intercept = self.y_intercept\n\n y = the_slope * x + the_y_intercept\n\n return Point(x, y)", "def closest_point_on_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n ap = subtract_vectors(point, a)\n c = vector_component(ap, ab)\n return add_vectors(a, c)", "def abline(slope, intercept, a, b):\n # axes = plt.gca()\n print(slope)\n print(intercept)\n x_vals = np.array(list_xs[ a: b])\n y_vals = intercept + slope * (x_vals-a)\n plt.plot(x_vals, y_vals, '--')\n # print(x_vals)", "def on_line_2d(p1, p2, pt, tol=None):\r\n if tol is None:\r\n tol = get_tol_2d()\r\n return geometry.gmOnLineWithTol(p1, p2, pt, tol)" ]
[ "0.7252494", "0.7058825", "0.70182276", "0.69539297", "0.68374145", "0.6692208", "0.66759145", "0.66448915", "0.6642555", "0.6631278", "0.660639", "0.65882164", "0.6564653", "0.65506494", "0.64859647", "0.64820635", "0.643047", "0.6408767", "0.63719994", "0.6311349", "0.62959486", "0.62786496", "0.6272593", "0.6259747", "0.6254772", "0.6251964", "0.62221694", "0.6212505", "0.6196671", "0.61846244", "0.61741275", "0.613847", "0.6112985", "0.6093683", "0.6090798", "0.6088082", "0.6075845", "0.60719544", "0.6064676", "0.6061504", "0.6029562", "0.60075635", "0.59780806", "0.5975386", "0.5968947", "0.5963056", "0.59595984", "0.595077", "0.5949105", "0.5943804", "0.5939806", "0.5937459", "0.59360856", "0.5936001", "0.59335345", "0.59221846", "0.58841527", "0.5880779", "0.58798754", "0.5871863", "0.5868773", "0.5863577", "0.58626366", "0.5850981", "0.58499384", "0.58483005", "0.5838161", "0.583388", "0.5832863", "0.58268034", "0.5826113", "0.58226264", "0.5813931", "0.579881", "0.57923293", "0.5788732", "0.57738554", "0.57703716", "0.5759548", "0.57581836", "0.5756676", "0.57488143", "0.57455015", "0.57419693", "0.5740083", "0.5732937", "0.5724718", "0.571886", "0.57182145", "0.57143015", "0.5714102", "0.5713617", "0.56971574", "0.5692494", "0.5687628", "0.56843597", "0.56822157", "0.568027", "0.56738174", "0.56648535" ]
0.80344635
0
Method to compute the orthogonal projection of a point on a straight line Let's define (D) the straight line which coeff are given as parameters
def compute_orthogonal_proj(line_coefs, point_coordinates): a = line_coefs[0] c = line_coefs[2] # Compute c_prime by replacing with the coordinates of the point c_prime = a*point_coordinates[1] + point_coordinates[0] x_proj = (c_prime-a*c)/((a**2)+1) y_proj = (a*c_prime+c)/((a**2)+1) return np.array([x_proj, y_proj])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_projection_of_pt_on_line(point, line_point1, line_point2):\n projection = Point(-1, -1)\n projection.x = point.x\n if (line_point2.x - line_point1.x) != 0:\n projection.y = (projection.x - line_point1.x) * (line_point2.y - line_point1.y) / \\\n (line_point2.x - line_point1.x) + line_point1.y\n else:\n projection.y = (projection.x - line_point1.x) * (line_point2.y - line_point1.y) / 1 + line_point1.y\n return projection", "def projectPoint(self, point):\n vector = self.normal_vector\n angle = vector.angle\n line = Line(point, angle, correct=False)\n projection = self.crossLine(line)\n return projection", "def eDouble(P): #adding P + P by using a tangent line\r\n R = point(0, 0, P.c)\r\n i = ( (3 * P.x ** 2) + P.c.a) #the slope equation (i/j)\r\n j = (2 * P.y)\r\n s = (i * modInv(j, P.c.p) ) % P.c.p\r\n R.x = ( (s ** 2) - 2 * P.x) % P.c.p\r\n R.y = (-P.y + s * (P.x - R.x) ) % P.c.p\r\n return R", "def line_plane(l, p):\n d = dot((p.o - l.o), p.n) / dot(l.d, p.n)\n return l(d)", "def project_point_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n ap = subtract_vectors(point, a)\n c = vector_component(ap, ab)\n\n return add_vectors(a, c)", "def proj_to_plane(norm, d, pts):\n a = norm[0]\n b = norm[1]\n c = norm[2]\n\n p = []\n\n for i in range(len(pts)):\n x_p = pts[i][0]\n y_p = pts[i][1]\n z_p = pts[i][2]\n\n if a != 0:\n x_0 = (b * b + c * c) * x_p - a * b * y_p - a * c * z_p - a * d\n y_0 = (b * 1.0 / a) * (x_0 - x_p) + y_p\n z_0 = (c * 1.0 / a) * (x_0 - x_p) + z_p\n\n elif b != 0:\n x_0 = x_p \n y_0 = c * c * y_p - b * (d + c)\n z_0 = (c * 1.0 / b) *(y_0 - y_p) + z_p\n\n else:\n x_0 = x_p\n y_0 = y_p\n z_0 = - d * 1.0 / c\n\n p.append([x_0, y_0, z_0])\n \n return p", "def proj_to_plane(norm, d, pts):\n a = norm[0]\n b = norm[1]\n c = norm[2]\n\n p = []\n\n for i in range(len(pts)):\n x_p = pts[i][0]\n y_p = pts[i][1]\n z_p = pts[i][2]\n\n if a != 0:\n x_0 = (b * b + c * c) * x_p - a * b * y_p - a * c * z_p - a * d\n y_0 = (b * 1.0 / a) * (x_0 - x_p) + y_p\n z_0 = (c * 1.0 / a) * (x_0 - x_p) + z_p\n\n elif b != 0:\n x_0 = x_p \n y_0 = c * c * y_p - b * (d + c)\n z_0 = (c * 1.0 / b) *(y_0 - y_p) + z_p\n\n else:\n x_0 = x_p\n y_0 = y_p\n z_0 = - d * 1.0 / c\n\n p.append([x_0, y_0, z_0])\n \n return p", "def compute_dual_line(P):\n return Line(P.x, -P.y)", "def project_line(self, line: Line, **kwargs: float) -> Line:\n if self.normal.is_parallel(line.vector, **kwargs):\n raise ValueError(\"The line and plane must not be perpendicular.\")\n\n point_projected = self.project_point(line.point)\n\n if self.normal.is_perpendicular(line.vector, **kwargs):\n return Line(point_projected, line.vector)\n\n vector_projected = self.project_vector(line.vector)\n\n return Line(point_projected, vector_projected)", "def build_coord(norm, d, pts):\n # Compute the origin as the mean point of the points, and this point has to be on the plane\n \n n = len(pts) \n x_total = 0\n y_total = 0\n z_total = 0\n \n for i in range(n):\n x_total += pts[i][0]\n y_total += pts[i][1]\n z_total += pts[i][2]\n\n x_o = x_total * 1.0 / n\n y_o = y_total * 1.0 / n\n z_o = z_total * 1.0 / n\n p_o = [x_o, y_o, z_o]\n \n # Choose p be the projection of a vector in the z-axis to the plane\n # If the plane is not perpendicular to the z-axis\n if ((norm[2] != 1) and (norm[2] != -1)): \n # Choose a point\n o_z = [x_o, y_o, z_o + 1]\n \n [[x_p, y_p, z_p]] = proj_to_plane(norm, d, [o_z])\n \n dist = np.linalg.norm([x_p - x_o, y_p - y_o, z_p - z_o])\n\n x_c = (x_p - x_o) * 1.0 / dist \n y_c = (y_p - y_o) * 1.0 / dist\n z_c = (z_p - z_o) * 1.0 / dist\n # Thus we have unit vector in x direction\n e_y = [x_c, y_c, z_c]\n #Compute the unit vector in y direction\n e_x = np.cross(e_y, norm).tolist()\n else:\n e_x = [1, 0, 0]\n e_y = [0, 1, 0]\n \n return [e_x, e_y, norm] , p_o", "def build_coord(norm, d, pts):\n # Compute the origin as the mean point of the points, and this point has to be on the plane\n \n n = len(pts)\n x_total = 0\n y_total = 0\n z_total = 0\n \n for i in range(n):\n x_total += pts[i][0]\n y_total += pts[i][1]\n z_total += pts[i][2]\n\n x_o = x_total * 1.0 / n\n y_o = y_total * 1.0 / n\n z_o = z_total * 1.0 / n\n p_o = [x_o, y_o, z_o]\n \n # Choose p be the projection of a vector in the z-axis to the plane\n # If the plane is not perpendicular to the z-axis\n if ((norm[2] != 1) and (norm[2] != -1)): \n # Choose a point\n o_z = [x_o, y_o, z_o + 1]\n \n [[x_p, y_p, z_p]] = proj_to_plane(norm, d, [o_z])\n \n dist = np.linalg.norm([x_p - x_o, y_p - y_o, z_p - z_o])\n\n x_c = (x_p - x_o) * 1.0 / dist \n y_c = (y_p - y_o) * 1.0 / dist\n z_c = (z_p - z_o) * 1.0 / dist\n # Thus we have unit vector in x direction\n e_y = [x_c, y_c, z_c]\n #Compute the unit vector in y direction\n e_x = np.cross(e_y, norm).tolist()\n else:\n e_x = [1, 0, 0]\n e_y = [0, 1, 0]\n \n return [e_x, e_y, norm] , p_o", "def spline_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope):\n\tC = c_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope)\n\tD = d_coefficients(x1,x2,x3,C)\n\tB = b_coefficients(x1,x2,x3,y1,y2,y3,C,D)\n\tA = a_coefficients(y1,y2)\n\treturn(A,B,C[:2],D)", "def distance_point_line_3d(point: Vector, start: Vector, end: Vector) -> float:\n if start.isclose(end):\n raise ZeroDivisionError('Not a line.')\n v1 = point - start\n # point projected onto line start to end:\n v2 = (end - start).project(v1)\n # Pythagoras:\n return math.sqrt(v1.magnitude_square - v2.magnitude_square)", "def orthogonalDistance(W, point):\n assert len(W) == 8\n assert len(point) == 8\n import numpy as np\n import math\n\n ## parametrize the line (y = A + tN where A is a point on the line, t \n ## a scalar and N a unit vector in the direction of the line)\n normW = math.sqrt(np.dot(W, W))\n A = (0,0,0,0,0,0,0, W[7])\n N = [float(item)/normW for item in W]\n\n ## Calculate the distance using d = || (A-point) - ((A-point)*N)N||\n AMinPoint = [a_i - p_i for a_i, p_i in zip(A, point)]\n AMinPointDotN = np.dot(AMinPoint, N)\n AMinPointDotNTimesN = [AMinPointDotN * ni for ni in N]\n dVector = [LHS - RHS for LHS, RHS in zip(AMinPoint, AMinPointDotNTimesN)]\n d = math.sqrt(np.dot(dVector, dVector))\n \n return d", "def perpendicularIntersection(point, linePoint1, linePoint2):\n\t\tx1 = linePoint1[0]\n\t\ty1 = linePoint1[1]\n\t\tx2 = linePoint2[0]\n\t\ty2 = linePoint2[1]\n\t\tx3 = point[0]\n\t\ty3 = point[1]\n\t\tk = ((y2-y1) * (x3-x1) - (x2-x1) * (y3-y1)) / ((y2-y1)**2 + (x2-x1)**2)\n\t\tx4 = x3 - k * (y2-y1)\n\t\ty4 = y3 + k * (x2-x1)\n\t\treturn (x4, y4)", "def EuclidI11(self, line: Line, point: Point, interesting=True) -> Line:\n if point not in line:\n raise ValueError(f'Cannot erect a perpendicular. Point {point} is not on line {line}.')\n # rename point as c\n c = point\n # Pick an arbitrary point D on the line.\n d = line.point1 if line.point1 != point else line.point2\n # Make CE equal to CD.\n circle_center_c_radius_cd = self.add_circle(c, d, interesting=interesting)\n intersections = list(self.find_intersections_line_circle(line, circle_center_c_radius_cd))\n e = intersections[0] if intersections[0] != d else intersections[1]\n # Construct the equilateral triangle FDE on DE\n f = self.EuclidI1(Line(d, e), self.pick_point_not_on_line(line))\n # join CF\n return self.add_line(c, f, interesting=interesting)", "def line(self, P, Q, R):\n if not R or R == self.infpoint:\n raise ValueError(\"R must not be zero\")\n if P == Q == self.infpoint:\n return self.basefield.one\n if P == self.infpoint:\n return R[0] - Q[0]\n if Q == self.infpoint:\n return R[0] - P[0]\n if P[0] != Q[0]:\n return (Q[0] - P[0]) * R[1] - (Q[1] - P[1]) * \\\n R[0]- Q[0] * P[1] + P[0] * Q[1]\n if P[1] != Q[1]:\n return R[0] - P[0]\n return (3 * P[0] ** 2 + 2 * self.a2 * P[0] + self.a4 - self.a1 * P[1] ) * \\\n R[0] - (2 * P[1] + self.a1 * P[0] + self.a3 ) * R[1] - \\\n (P[0] ** 3) + self.a4 * P[0] + 2 * self.a6 - self.a3 * P[1]", "def orthoPolyPower(x,power):\n y = x**power\n x_normalized = x / np.dot(x,x) ** 0.5\n ortho = y - np.dot(x_normalized,y) * x_normalized\n orthonormal = ortho / np.dot(ortho,ortho)**0.5\n return orthonormal", "def A_coefficients_ellipsoid(v, DD, bDDisDelta=False):\n #v can be given as an array with X/Y/Z cartesian dimensions being the last.\n #\"\"\"\n if bDDisDelta:\n delta=DD\n else:\n delta=Ddelta_ellipsoid(dd)\n #v=_sanitise_v(v)\n #v2=np.square(v)\n #v4=np.square(v2)\n #fact2=np.multiply(0.75,np.sum(v4))-0.25\n v2 = [ v[i]*v[i] for i in range(3) ]\n v4 = [ v2[i]*v2[i] for i in range(3) ]\n fact2 = 0.25*( 3.0*(v4[0]+v4[1]+v4[2])-1.0)\n fact3 = 1.0/12.0*(delta[0]*(3*v4[0]+6*v2[1]*v2[2]-1) + delta[1]*(3*v4[1]+6*v2[0]*v2[2]-1) + delta[2]*(3*v4[2]+6*v2[0]*v2[1]-1))\n A=np.zeros(5)\n A[0]= 3*v2[1]*v2[2]\n A[1]= 3*v2[0]*v2[2]\n A[2]= 3*v2[0]*v2[1]\n A[3]= fact2-fact3\n A[4]= fact2+fact3\n return A", "def lorentzian2d(p, x, y):\n #2012-02-04 11:38 IJMC: Created\n \n x = array(x, dtype=float).copy()\n y = array(y, dtype=float).copy()\n p = array(p).copy()\n\n if len(p)==5:\n p = concatenate((p, [0, 0]))\n elif len(p)==6:\n p = concatenate((p, [0]))\n\n z = ((x - p[3]) / p[1])**2 + ((y - p[4]) / p[2])**2 + p[5] * (x - p[3]) * (y - p[4])\n \n return p[6] + p[0]/(1. + z)", "def dist_to_line2d(line, point):\n\tx1,y1 = line[0]\n\tx2,y2 = line[1]\n\tx3,y3 = point\n\t\n\t# where on line the perpendicular is\n\tu = ( ((x3-x1)*(x2-x1) + (y3-y1)*(y2-y1))\n\t\t\t/ (math.pow(x1-x2,2) + math.pow(y1-y2,2)) )\n\t\n\t# intersection point\n\tx = x1 + u*(x2-x1)\n\ty = y1 + u*(y2-y1)\n\t\n\tdist = math.sqrt(math.pow(x-x3,2)+math.pow(y-y3,2))\n\t\n\treturn dist", "def calcul_point_plan_projection(cls,cx,cy,cz,spx,spy,axe_x,axe_y):\n projX=gs.Vector3(spx*axe_x.x,spx*axe_x.y,spx*axe_x.z)\n projY=gs.Vector3(spy*axe_y.x,spy*axe_y.y,spy*axe_y.z)\n point=gs.Vector3(projX+projY)+gs.Vector3(cx,cy,cz)\n return point", "def projection_P(P_prime):\n sorted_prime = -np.sort(-P_prime, axis=1) # Descending order sort\n cumsum_sorted = np.cumsum(sorted_prime, axis=1) # Compute cumulative sum of lines\n rho_availability = sorted_prime > (cumsum_sorted - 1) / np.arange(1, P_prime.shape[\n 1] + 1) # Compute non-zero rho candidates\n rho = np.count_nonzero(rho_availability, axis=1) # Compute number of non-zero values in final line (rho)\n theta = (cumsum_sorted[np.arange(len(rho)), rho - 1] - 1) / (rho) # Compute lagrange multiplier theta\n P = (P_prime.transpose() - theta).transpose().clip(min=0) # subtract multiplier, clip negatives\n\n return P", "def orthogonal(v):\n return np.array([-v[1], v[0]])", "def get_normal_dist(line, point):\n \n # Rotate: \n x_rot = np.cos(line[1])*point[0] + np.sin(line[1])*point[1]\n \n # Normal distance: x_rot - rho:\n return x_rot - line[0]", "def compute_projection(M):\n P = torch.mm(M, torch.pinverse(M.T.matmul(M)).matmul(M.T))\n P = P.double()\n return P", "def _orthogonal_vector(vector):\n return -1 * vector[1], vector[0]", "def get_projection_point(self, point, plane, test=False):\n return point_on_plane_projection(point, plane, test=test)", "def ortho_poly_predict(x, alpha, norm2, degree = 1):\n x = np.asarray(x).flatten()\n n = degree + 1\n Z = np.empty((len(x), n))\n Z[:,0] = 1\n if degree > 0:\n Z[:, 1] = x - alpha[0]\n if degree > 1:\n for i in np.arange(1,degree):\n Z[:, i+1] = (x - alpha[i]) * Z[:, i] - (norm2[i] / norm2[i-1]) * Z[:, i-1]\n Z /= np.sqrt(norm2)\n return Z", "def proj_linf(v, radius=1):\n vmod = np.abs(v)\n projmult = np.minimum(radius/vmod, 1)\n return projmult*v", "def linePointXY(l,p,inside=True,distance=False,params=False):\n a=l[0]\n b=l[1]\n # check for degenerate case of zero-length line\n abdist = dist(a,b)\n if abdist < epsilon:\n #raise ValueError('zero-length line passed to linePointXY')\n print('zero-length line passed to linePointXY')\n return False\n\n if distance and params:\n raise ValueError('incompatible distance and params parameters passed to linePointXY')\n\n x0=p[0]\n y0=p[1]\n z0=p[2]\n x1=a[0]\n y1=a[1]\n z1=a[2]\n x2=b[0]\n y2=b[1]\n z2=b[2]\n\n ## check to see if all three points lie in the same x,y plane\n if not isXYPlanar([p,a,b]):\n raise ValueError('non-XY points in linePointXY call')\n return false\n # if abs(z1-z0) > epsilon or abs(z2-z0) > epsilon:\n # return False\n\n linedist = abs( ((y2-y1)*x0 - (x2-x1)*y0 + x2*y1 - y2*x1)/abdist)\n\n ## this is the fast case:\n if not inside and distance:\n return linedist\n \n ## find out where the intersection between the original line and a\n ## line defined by the point and an orthogonal direction vector\n ## is. We do this by constructing two direction vectors\n ## orthogonal to the orgiginal line scaled by the line distance,\n ## and adding them to the point in question. Assuming that the\n ## line distance is not zero, only one of these constructed points\n ## will fall on the line\n\n ## compute unit direction vector for original line\n dir = sub(b,a)\n dir = scale3(dir,1.0/mag(dir))\n\n ## compute two orthogonal direction vectors of length linedist\n ordir1 = scale3(orthoXY(dir),linedist)\n ordir2 = scale3(ordir1, -1.0)\n \n ## there are two possible intersection points\n pi1 = add(p,ordir1)\n pi2 = add(p,ordir2)\n\n ## compute distances\n d1pa = dist(a,pi1)\n d1pb = dist(pi1,b)\n d1 = d1pa+d1pb # \"triangle\" with pi1\n\n d2pa = dist(a,pi2)\n d2pb = dist(pi2,b)\n d2 = d2pa+d2pb # \"triangle\" with pi2\n\n ## the shortest \"triangle\" distance will signal the point that\n ## is actually on the line, even if that point falls outside\n ## the a,b line interval\n \n if params or not inside: # if we don't care about being inside the\n # line segment\n if d1 <= d2:\n if distance:\n return d1\n elif params:\n return d1pb/abdist\n else:\n return pi1\n else:\n if distance:\n return d2\n elif params:\n return d2pb/abdist\n else:\n return pi2\n \n \n ## if the closest point on the line to point p lies between\n ## the endpoints of the line, then either d1 or d2 will equal\n ## abdist. IF neither do, then we know that the closest point lies\n ## outside the endpoints\n\n if abs(d1-abdist) < epsilon:\n if distance:\n return linedist\n else:\n return pi1\n\n if abs(d2-abdist) < epsilon:\n if distance:\n return linedist\n else:\n return pi2\n\n ## closest point is outside the interval. That means that the\n ## distance from point p to whichever endpoint is smaller is the\n ## closest distance\n\n d3 = dist(a,p)\n d4 = dist(b,p)\n\n if d3 < d4:\n if distance:\n return d3\n else:\n return a\n else:\n if distance:\n return d4\n else:\n return b", "def get_line_distance(self, p):\n\n y = 1000 * p.y\n R = 1000 * self.geometry.R\n x = copysign(sqrt(y ** 2 + (R - sqrt(R ** 2 - y ** 2))), y)\n x = 2 * R * asin(x / (2 * R))\n #x=y\n b = -x / sqrt(R ** 2 - x ** 2)\n theta = atan(b) # grating tangent angle\n print b, theta\n d = 0\n for n, a in enumerate(self.an):\n d += a * x ** n\n d *= cos(theta)\n return 1e-3 / d", "def Perpendicular(self, line: Line, point: Point, interesting=True) -> Line:\n if point in line:\n return self.ErectPerpendicular(line, point, interesting=interesting)\n else:\n return self.DropPerpendicular(line, point, interesting=interesting)", "def plane_equation(point_a, point_b, point_c):\n v1 = np.subtract(point_a, point_c)\n v2 = np.subtract(point_a, point_b)\n normal = np.cross(v1, v2)\n # print 'b4 norm', normal\n unit_normal = norm_vect(normal)\n # print 'unityyy', unit_normal\n return unit_normal", "def DistPoint2Line(point,line_point1, line_point2=np.array([0,0,0])):\n return np.linalg.norm(np.cross((point-line_point2),(point-line_point1)))/np.linalg.norm(line_point1 - line_point2)", "def get_orthogonal_vec2d(vec):\n ortho = np.array([-vec[1], vec[0]])\n return ortho", "def proj_gnomonic_plane(lamb0, phi1, lamb, phi):\n\n cosc = np.sin(phi1)*np.sin(phi)\n cosc += np.cos(phi1)*np.cos(phi)*np.cos(lamb-lamb0)\n\n x = np.cos(phi)*np.sin(lamb-lamb0)\n x /= cosc\n\n y = np.cos(phi1)*np.sin(phi)\n y -= np.sin(phi1)*np.cos(phi)*np.cos(lamb-lamb0)\n\n y /= cosc\n\n return x, y", "def PerpendicularBisector(self, line: Line, interesting=True) -> Line:\n a, b = line.point1, line.point2\n # These are the steps of the constructions\n circ1 = self.add_circle(a, b, interesting=interesting)\n circ2 = self.add_circle(b, a, interesting=interesting)\n intersections = self.find_intersections(circ1, circ2) # Only include circle intersections.\n return self.add_line(*intersections, interesting=interesting)", "def ortho_line_cut(self):\n x_mid_left, y_mid_left = self.midpoint(0,1) # Computes the mid point of the LHS face of the edm cut\n x_mid_right, y_mid_right = self.midpoint(2,3) # Computes the mid point of the RHS face of the edm cut\n\n ave_grad = self.average_grad()\n m_horizontal = -1/ave_grad #90 degrees rotation of the vertical line average gradient\n\n horizontal_eq_c = y_mid_right - m_horizontal*x_mid_right # y offset of horizontal line\n vertical_eq_left_c = y_mid_left - ave_grad * x_mid_left # y offset of vertical line on left side\n\n x_intersect, y_intersect = self.intersect_point(m_horizontal, horizontal_eq_c, ave_grad,vertical_eq_left_c)\n\n\n coordleft = [x_intersect, y_intersect]\n coordright =[x_mid_right, y_mid_right]\n\n dist = self.distance(coordleft, coordright)\n\n return coordleft, coordright, dist", "def proj(v, gamma, D, C):\n D_hat = 1/(np.sqrt(D))\n l = gamma/(2*D) + v\n K = C + np.sum((0.5*gamma/np.sqrt(D))**2)\n w = lambda z: ((1/np.sqrt(D))*(z - 0.5*(gamma/np.sqrt(D)))) #go back to w\n #check the least squares solution:\n z_ls = (1/D_hat)*l\n if np.linalg.norm(z_ls,2)**2 <= K:\n proj = w(z_ls)\n return proj\n #Otherwise, look for Lambda\n obj = lambda Lambda: ((np.linalg.norm(((1/(D_hat**2 + Lambda*np.ones(len(D_hat))))*D_hat)*l,2))**2 - K)\n Lambda_opt = scipy.optimize.fsolve(obj, 0)[0]\n z_opt = ((1/(D_hat**2 + Lambda_opt*np.ones(len(D_hat))))*D_hat)*l\n proj = w(z_opt)\n if np.sum(np.isnan(proj)) > 0:\n raise ZeroDivisionError('Division by zero! Some matrices are not invertible!')\n return proj", "def proj_xy(self, t, next=None):\n if next is None:\n return self.normal(t).v.normalized(), 1\n v0 = self.normal(1).v.normalized()\n v1 = next.normal(0).v.normalized()\n direction = v0 + v1\n adj = (v0 * self.length) * (v1 * next.length)\n hyp = (self.length * next.length)\n c = min(1, max(-1, adj / hyp))\n size = 1 / cos(0.5 * acos(c))\n return direction.normalized(), min(3, size)", "def get_perpendicular(n: np.ndarray) -> np.ndarray:\n # find smallest component\n i = np.argmin(n)\n\n # get the other two indices\n a = (i + 1) % 3\n b = (i + 2) % 3\n\n result = np.zeros(3)\n result[i] = 0.0\n result[a] = n[b]\n result[b] = -n[a]\n return result", "def to_linear(self):\n return inv(quad_hybrid).dot(self.circular)", "def intersect_line(self, line: Line, **kwargs) -> Point:\n if self.normal.is_perpendicular(line.direction, **kwargs):\n raise ValueError(\"The line and plane must not be parallel.\")\n\n vector_plane_line = Vector.from_points(self.point, line.point)\n\n num = -self.normal.dot(vector_plane_line)\n denom = self.normal.dot(line.direction)\n\n # Vector along the line to the intersection point.\n vector_line_scaled = num / denom * line.direction\n\n return line.point + vector_line_scaled", "def line_point_shortest_dist(r: np.ndarray, v: np.ndarray, p: np.ndarray) -> Tuple[float, float]:\n\n t = np.dot(v, p - r) / np.dot(v, v)\n d = np.linalg.norm((r + v * t) - p)\n return d, t", "def reproject(point):\n wgs84 = pyproj.Proj('+init=epsg:4326')\n native = pyproj.Proj(DEM_PROJECTION)\n x, y = pyproj.transform(wgs84, native, point.x, point.y)\n return geom.Point(x, y)", "def GetParametricCoords(self):\n ...", "def function_3d(point):\n return point[0]**2 + point[1]**2 + point[2]**2 - 1", "def get_projections(points, vec_ucm):\n # Get the vector orthogonal to the UCM.\n vec_ortho = get_orthogonal_vec2d(vec_ucm)\n # Build a transformation matrix with vec_ucm and vec_ortho as new basis vectors.\n A = np.vstack((vec_ucm, vec_ortho)).T # A is not an orthogonal projection matrix (A=A.T), but this works.\n # Centralize the data. Analogous to calculating across trials deviation from average for each time step.\n diffs = points - points.mean()\n # For computational efficiency we shortcut the projection calculation with matrix multiplication.\n # The actual math behind it:\n # coeffs = vec_ucm.T@diff/np.sqrt(vec_ucm.T@vec_ucm), vec_ortho.T@diff/np.sqrt(vec_ortho.T@vec_ortho)\n # Biased variance (normalized by (n-1)) of projection onto UCM vector:\n # var_ucm = vec_ucm.T@np.cov(diffs, bias=True, rowvar=False)@vec_ucm/(vec_ucm.T@vec_ucm) # Rayleigh fraction.\n coeffs = diffs@A\n coeffs.columns = ['parallel', 'orthogonal']\n return coeffs", "def calculate_stereographic_projection(p):\n # P' = P * (2r / r + z)\n mu = 1 / (1 + p[2])\n x = p[0] * mu\n y = p[1] * mu\n return x, y", "def proj3d(v):\n v = normalize(v)\n x, y, z, w = v\n return np.array([x, y, z]) / (1 + 1e-8 - w) # avoid divide by zero", "def orthoXY(a):\n\n return [ a[1], -a[0], 0, 1.0 ]", "def dist_to_line(self, line, pt):\n return abs(line[0]*pt.x + line[1]*pt.y + line[2])/math.sqrt(line[0]**2 + line[1]**2)", "def project_point(self, point: array_like) -> Point:\n # Vector from the point in space to the point on the plane.\n vector_to_plane = Vector.from_points(point, self.point)\n\n # Perpendicular vector from the point in space to the plane.\n vector_projected = self.normal.project_vector(vector_to_plane)\n\n return Point(point) + vector_projected", "def horizontal_projection(self, tangent_vec, base_point):\n caller_name = sys._getframe().f_back.f_code.co_name\n if not caller_name == \"vertical_projection\":\n try:\n return tangent_vec - self.vertical_projection(tangent_vec, base_point)\n except NotImplementedError:\n pass\n\n return self.horizontal_lift(\n self.tangent_riemannian_submersion(tangent_vec, base_point),\n fiber_point=base_point,\n )", "def parabolic_2D_function(\n x_y: Tuple,\n a_x2: float,\n a_x1: float,\n a_x0: float,\n a_y2: float,\n a_y1: float,\n a_y0: float,\n):\n\n x, y = x_y\n\n I_v = (a_x2 * x**2 + a_x1 * x + a_x0) / 2\n I_v += (a_y2 * y**2 + a_y1 * y + a_y0) / 2\n\n return I_v", "def projection(self, point):\n projected_point = self._iterate_over_factors(\"projection\", {\"point\": point})\n return projected_point", "def project_onto_plane(self,z):\n U=self.U\n Q=self.Q_p\n #print(((z-Q[-2,:,[2]])/P[-2,:,[2]]).T)\n #print(P[-2])\n return ((z-Q[-2,:,[2]])/U[-2,:,[2]]).T*U[-2]+Q[-2]", "def cartesian(self) -> Tuple[np.number, np.number, np.number, np.number]:\n if self.dimension > 3:\n raise ValueError(\"The plane dimension must be <= 3.\")\n\n # The normal must be 3D to extract the coefficients.\n a, b, c = self.normal.set_dimension(3)\n\n d = -self.normal.dot(self.point)\n\n return a, b, c, d", "def perspective_lut(image_shape: tuple, principal_point: np.array, focal_length: float,\n model_coefficients: np.array) -> tuple:\n\n focal_length = np.abs(focal_length)\n\n # Create image coordinate mesh-grids. As the name implies, these are in the image coordinate system\n # with the origin at the top left corner\n u, v = np.meshgrid(\n np.arange(image_shape[1], dtype=np.float),\n np.arange(image_shape[0], dtype=np.float)\n )\n\n # Convert the coordinates into sensor coordinates (origin is at the principal point, and the\n # sensor is a focal length distance away from the lens optical centre)\n u -= principal_point[0]\n v -= principal_point[1]\n sensor_coords = np.vstack((u.flatten(), v.flatten(), np.ones(u.size) * focal_length))\n\n # Calculate the polynomial basis for the camera/lens model\n # rho is the Euclidean distance of the sensor position from the principal point\n rho = np.sqrt(np.square(sensor_coords[0, :]) + np.square(sensor_coords[1, :]))\n theta = np.arctan(np.divide(-sensor_coords[2,], rho))\n # calculate the polynomial basis, based on the angle\n basis = polynomial_basis(theta, model_coefficients.size)\n\n r = np.multiply(model_coefficients.reshape((model_coefficients.size, -1)), basis)\n r = np.sum(r, axis=0)\n r /= rho\n\n x_result = principal_point[0] + sensor_coords[0,] * r\n y_result = principal_point[1] + sensor_coords[1,] * r\n x_result = x_result.reshape((image_shape[0], image_shape[1]))\n y_result = y_result.reshape((image_shape[0], image_shape[1]))\n\n return x_result.astype(np.float32), y_result.astype(np.float32)", "def closest_point_on_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n ap = subtract_vectors(point, a)\n c = vector_component(ap, ab)\n return add_vectors(a, c)", "def intersect(l: Line, p: Plane) -> Point:\n if math.isclose((l.d * p.normal()), 0):\n # If the line direction is perpendicular to the plane normal,\n # the line and plane must be parallel.\n return None\n else:\n # There exists a parameter t, which makes\n # p.isInPlane(l.point(t)) == 0\n # Let's find it.\n # Initial guess\n t1 = 1\n p1 = l.point(t1)\n d1 = distancePointPlane(p1, p)\n t2 = 2\n p2 = l.point(t2)\n d2 = distancePointPlane(p2, p)\n\n # Calculate line through the two points (t,d)\n a = (d2 - d1) / (t2 - t1)\n b = d1 - a * t1\n\n # Find the t-value where d is zero\n # 0 = at+b <=> t = -b/a\n t = -b / a\n print(\"parameter: {}\".format(t))\n return l.point(t)", "def _dist_point2line(self, point: ndarray,\n line: Tuple[ndarray, ndarray]) -> ndarray:\n\n assert isinstance(line, tuple)\n point1, point2 = line\n d = abs(np.cross(point2 - point1, point - point1)) / (\n norm(point2 - point1) + 1e-8)\n return d", "def point_of_intersection(l, pz=distance):\r\n # Must fix the error here. Right now, any vector can have a point in the plane.\r\n # Must make it so that only vectors pointing in the planes direction has a point there\r\n # Can be done by checking whether d is positive or not.\r\n # This is to prevent vectors that point away from the detector to be counted\r\n # The definitions below assume that the detector is centred in the origin and its length is oriented along the z-axis.\r\n p0 = np.array([0,0,pz]) # Point on the plane\r\n l0 = np.array([0,0,0]) # Point on the line\r\n n = np.array([0,0,1]) # Normal vector of the plane\r\n d = np.dot(p0-l0, n)/np.dot(l, n)\r\n point = [i*d for i in l]\r\n return point", "def proj(self, x: np.ndarray):\n return self.matvec(self.pinv(x))", "def distance_point_line_sqrd(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n pa = subtract_vectors(a, point)\n pb = subtract_vectors(b, point)\n l = length_vector_sqrd(cross_vectors(pa, pb))\n l_ab = length_vector_sqrd(ab)\n return l / l_ab", "def get_perpendicular2d(vector):\n if vector[1] == 0:\n return np.asarray([0.,1.])\n v2_0 = 1.0\n v2_1 = -(vector[0]/vector[1])\n v2 = np.asarray([v2_0, v2_1])\n return v2 / np.linalg.norm(v2)", "def point_and_plane_pose(plane_point, plane_orientation, points=None, xyz=None):\n vector = plane_orientation\n vector = vector / np.linalg.norm(vector)\n a = vector[0]\n b = vector[1]\n c = vector[2]\n\n d = -a * plane_point[0] - b * plane_point[1] - c * plane_point[2]\n\n if xyz is not None:\n xyz = np.asarray(xyz)\n if points.shape[0] != 3:\n logger.error(\n \"Wrong points shape. [3, N] expected, \" + str(points.shape) + \" given.\"\n )\n elif points is not None:\n points = np.asarray(points)\n if points.shape[1] != 3:\n logger.error(\n \"Wrong points shape. [N, 3] expected, \" + str(points.shape) + \" given.\"\n )\n xyz = points.T\n else:\n logger.error(\"points or xyz must be declared\")\n\n x, y, z = xyz\n z_out = (a * x + b * y + c * z + d) / (a ** 2 + b ** 2 + c ** 2) ** 0.5\n\n return z_out", "def perpendicular_bisector(point_1, point_2):\r\n A = 2 * (point_2.x - point_1.x)\r\n B = 2 * (point_2.y - point_1.y)\r\n C = (point_1.y - point_2.y) * (point_1.y + point_2.y) + \\\r\n (point_1.x - point_2.x) * (point_1.x + point_2.x)\r\n return np.matrix([[A],[B],[C]])", "def generate_line(point_1, point_2):\r\n A = point_1.y - point_2.y\r\n B = point_2.x - point_1.x\r\n C = point_1.y * B + point_1.x * A\r\n return np.matrix([[A],[B],[-C]])", "def claret_linear(mu, coeff):\n return 1.0 - coeff * (1.0 - mu)", "def gnomonic_project_toxy(RA1, Dec1, RAcen, Deccen):\n # also used in Global Telescope Network website\n cosc = np.sin(Deccen) * np.sin(Dec1) + np.cos(Deccen) * np.cos(Dec1) * np.cos(RA1-RAcen)\n x = np.cos(Dec1) * np.sin(RA1-RAcen) / cosc\n y = (np.cos(Deccen)*np.sin(Dec1) - np.sin(Deccen)*np.cos(Dec1)*np.cos(RA1-RAcen)) / cosc\n return x, y", "def diag_line((lat0, lon0, alt0), (lat, lon, alt), k=5):\n\tlats = np.linspace(lat0, lat, k)\n\tlons = np.linspace(lon0, lon, k)\n\talts = np.linspace(alt0, alt, k)\n\tp = zip(lats, lons, alts)\n\treturn p", "def c_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope):\n\tC = c_matrix(x1,x2,x3)\n\ty = y_vector(x1,x2,x3,y1,y2,y3,initial_slope,final_slope)\n\tCCoefficients = np.dot(inv(C),y)\n\treturn(CCoefficients)", "def ortho(self):\r\n\r\n m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist\r\n\r\n x = _vec3(m11, m21, m31)\r\n y = _vec3(m12, m22, m32)\r\n z = _vec3(m13, m23, m33)\r\n\r\n xl = x.length()\r\n xl*=xl\r\n y = y - ((x*y)/xl)*x\r\n z = z - ((x*z)/xl)*x\r\n\r\n yl = y.length()\r\n yl*=yl\r\n z = z - ((y*z)/yl)*y\r\n\r\n return mat4( x.x, y.x, z.x, m14,\r\n x.y, y.y, z.y, m24,\r\n x.z, y.z, z.z, m34,\r\n m41, m42, m43, m44)", "def eAdd(P, Q): #adds 2 points by using the slope to find where the line intersects and returns the negation of that point\r\n R = point(0,0,P.c) #creates point object to store result\r\n if (P.x == 0 and P.y == 0) and (Q.x == 0 and Q.y == 0): #(0,0) is the identity\r\n return P #returns the identity\r\n elif P.x == 0 and P.y == 0:\r\n return Q\r\n elif Q.x == 0 and Q.y == 0:\r\n return P\r\n elif P == Q: #in case it is called when double should be\r\n R = eDouble(P)\r\n else: #this preforms the actual addition\r\n i = P.y-Q.y\r\n j = P.x-Q.x\r\n s = (i * modInv(j, P.c.p) ) % P.c.p\r\n R.x = ( ( (s**2) - P.x - Q.x) % P.c.p)\r\n R.y = ( (-P.y + s * (P.x - R.x) ) % P.c.p)\r\n return R", "def ortho_poly_fit(x, degree = 1,center = False):\n n = degree + 1\n x = np.asarray(x).flatten()\n if(degree >= len(np.unique(x))):\n stop(\"'degree' must be less than number of unique points\")\n xbar = np.mean(x)\n if center:\n x = x - xbar\n X = np.fliplr(np.vander(x, n))\n q,r = np.linalg.qr(X)\n\n z = np.diag(np.diag(r))\n raw = np.dot(q, z)\n\n norm2 = np.sum(raw**2, axis=0)\n alpha = (np.sum((raw**2)*np.reshape(x,(-1,1)), axis=0)/norm2 + xbar)[:degree]\n Z = raw / np.sqrt(norm2)\n return Z, norm2, alpha", "def parametrized_circle(point_a, point_b, point_c, theta):\n radius, center = shortest_line_to_point(point_a, point_b, point_c)\n # print'center, radius \\n', center, radius\n center_axis = np.subtract(point_a, point_b)\n # print 'center axis %s , radius %s, center %s' % (center_axis, radius, center)\n # center_axis dot <1,1,z> = 0 returns perp vector\n in_plane = norm_vect(np.subtract(point_c, center))\n perp_1 = np.cross(center_axis, in_plane)\n perp_2 = np.cross(center_axis, perp_1)\n # print 'perp dick', perp_1, perp_2\n # norm perpendicular vectors\n perp_1 = norm_vect(perp_1)\n perp_2 = norm_vect(perp_2)\n if -1e-6 > np.dot(perp_1, perp_2) > 1e-6 or -1e-6 > (np.dot(perp_1, center_axis)) > 1e-6 or \\\n -1e-6 > np.dot(perp_2, center_axis) > 1e-6:\n print 'not perpendicular'\n # print np.dot(perp_1, perp_2), np.dot(perp_1, center_axis), np.dot(perp_2, center_axis)\n x = center[0] + (radius * math.cos(theta) * perp_2[0]) + (radius * math.sin(theta) * perp_1[0])\n y = center[1] + (radius * math.cos(theta) * perp_2[1]) + (radius * math.sin(theta) * perp_1[1])\n z = center[2] + (radius * math.cos(theta) * perp_2[2]) + (radius * math.sin(theta) * perp_1[2])\n return [x, y, z]", "def line_equation_ap(angle, (x1, y1)):\n \n # get second point on the line\n x2 = float(x1) + cos(angle)\n y2 = float(y1) + sin(angle)\n \n # return A, B and C coefficients\n return (y1 - y2, x2 - x1, x1*y2 - x2*y1)", "def perpendicular(self):\n return tuple.__new__(Vec2, (-self[1], self[0]))", "def straight_line(vel, init_pos, final_pos):\n\n acc = np.zeros (3)\n yaw = 1.0\n yawdot = 1.0\n\n p = (final_pos[2] - init_pos[2])/vel [2]\n #acc = (final_pos[2] - init_pos[2])/dt ** 2\n\n # constant velocity\n pos = init_pos + np.array([0, 0, p])\n FinalState = namedtuple('FinalState', 'pos vel acc yaw yawdot')\n return FinalState(pos, vel, acc, yaw, yawdot)", "def orthogonal_to(vector: ModelParameters) -> ModelParameters:\n new_vector = rand_u_like(vector)\n new_vector = new_vector - new_vector.dot(vector) * vector / math.pow(vector.model_norm(2), 2)\n return new_vector", "def convert_point(triangle, coefficients):\n return (triangle[0][0]*coefficients[0] + triangle[1][0]*coefficients[1] +\n triangle[2][0]*coefficients[2], triangle[0][1]*coefficients[0] +\n triangle[1][1]*coefficients[1] + triangle[2][1]*coefficients[2])", "def projectPoint(self,p):\n a,b,c = self.a, self.b, self.c\n x,y = p\n return numpy.array( [ b*(x*b-y*a) - c*a, a*(y*a-x*b) - c*b ] )", "def shortest_line_to_point(point_a, point_b, point_c): # where a and b are on spin axis, c is the point spinning round\n axis_vect = np.subtract(point_a, point_b)\n axis_mag = magnitude(point_a, point_b)\n unit_axis = np.divide(axis_vect, axis_mag) # unit of pp\n # pp' constants - p\n\n # pp dot u\n t = np.sum(np.dot(unit_axis, unit_axis))\n c = np.sum(np.dot(np.subtract(point_b, point_c), unit_axis))\n p = -c / t\n project_point_on_axis_add = (np.multiply(unit_axis, p))\n project_point_on_axis = project_point_on_axis_add + point_b\n distance = magnitude(point_c, project_point_on_axis)\n return distance, project_point_on_axis", "def find_direction_vector(line):\n pt1, pt2 = line\n pt1 = np.array(pt1).reshape(2,)\n pt2 = np.array(pt2).reshape(2,)\n direct = pt2 - pt1\n direct_norm = normalize(direct)\n return direct_norm", "def plane_equation(p1, p2, p3):\n a1 = p2[0] - p1[0]\n b1 = p2[1] - p1[1]\n c1 = p2[2] - p1[2]\n a2 = p3[0] - p1[0]\n b2 = p3[1] - p1[1]\n c2 = p3[2] - p1[2]\n a = b1 * c2 - b2 * c1\n b = a2 * c1 - a1 * c2\n c = a1 * b2 - b1 * a2\n # Points are collinear\n if (abs(a) < 1e-6) and (abs(b) < 1e-6) and (abs(c) < 1e-6):\n return None\n # All clear\n d = (- a * p1[0] - b * p1[1] - c * p1[2])\n return a, b, c, d", "def render_4d(polyhedron, **kwds):\n projection_direction = None\n try: \n projection_direction = kwds.pop('projection_directior')\n except KeyError:\n for ineq in polyhedron.inequality_generator():\n center = [v() for v in ineq.incident() if v.is_vertex()]\n center = sum(center) / len(center)\n if not center.is_zero(): \n projection_direction = center\n break\n projection_3d = Projection(polyhedron).schlegel(projection_direction)\n return render_3d(projection_3d, **kwds)", "def get_line_to(self, point):\n\n b = ((self.x - point.x)*point.y - (self.y - point.y)*point.x)/(self.x - point.x)\n\n a = (self.y - point.y)/(self.x - point.x)\n\n return a, b", "def distance_point_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n pa = subtract_vectors(a, point)\n pb = subtract_vectors(b, point)\n l = length_vector(cross_vectors(pa, pb))\n l_ab = length_vector(ab)\n return l / l_ab", "def orthogonalize(*vlist, orthonormal=False):\n\n if not all(isinstance(vec, Vector) for vec in vlist):\n raise TypeError('Each element must be of Type Vector')\n\n ortho_vlist = []\n for i, term in enumerate(vlist):\n for j in range(i):\n term -= ortho_vlist[j].projection(vlist[i])\n # TODO : The following line introduces a performance issue\n # and needs to be changed once a good solution for issue #10279 is\n # found.\n if simplify(term).equals(Vector.zero):\n raise ValueError(\"Vector set not linearly independent\")\n ortho_vlist.append(term)\n\n if orthonormal:\n ortho_vlist = [vec.normalize() for vec in ortho_vlist]\n\n return ortho_vlist", "def orthogonal_component(self, basis: Vector) -> Vector:\n return self - self.parallel_component(basis)", "def EuclidI12(self, line: Line, point: Point, interesting=True) -> Line:\n if point in line:\n raise ValueError(f'Cannot drop a perpendicular. Point {point} is on line {line}.')\n # rename point as C\n c = point\n # Pick a point on the opposite side of line from c\n d = self.pick_point_not_on_line_on_side(line, c, same_side=False)\n circle_center_c_radius_cd = self.add_circle(c, d, interesting=interesting)\n a, b = self.find_intersections_line_circle(line, circle_center_c_radius_cd)\n side = d\n f = self.ErectEquilateralTriangle(Line(a, b), side=side, interesting=interesting)\n return self.add_line(c, f)", "def vertical_projection(self, tangent_vec, base_point, **kwargs):\n caller_name = sys._getframe().f_back.f_code.co_name\n if caller_name == \"horizontal_projection\":\n raise NotImplementedError\n\n return tangent_vec - self.horizontal_projection(tangent_vec, base_point)", "def crossLine(self, other):\n a, b = self.point\n c, d = other.point\n m, n = self.vector\n o, p = other.vector\n if n * o == m * p: # The lines are parallels\n return None\n elif self.angle == -math.pi / 2:\n return Point(a, d)\n elif other.angle == -math.pi / 2:\n return Point(b, c)\n else:\n x = (a * n * o - b * m * o - c * m * p + d * m * o) / (n * o - m * p)\n y = (x - a) * n / m + b\n return Point(x, y)", "def p2d(V,x,y):\n def s(a,N):\n \"\"\"Shortcut function to convert array x into a coluumn vector.\"\"\"\n a=np.reshape(a,(1,N**2),order='F').T\n return a\n N=V.shape[1]\n con=np.ones((x.shape[0],x.shape[1])) # constant terms\n xx,yy,xy=x*x,y*y,x*y\n xxx,yyy,xxy,xyy=xx*x,yy*y,xx*y,x*yy\n xxxx,yyyy,xxxy,xxyy,xyyy=xx*xx,yy*yy,xxx*y,xx*yy,x*yyy\n V2=s(V,N) \n lst=[yyyy,xxxy,xxyy,xyyy,xxx,yyy,xxy,xyy,xx,yy,xy,x,y,con]\n Q=s(xxxx,N)\n count = 0\n for elem in lst:\n elem=s(elem,N)\n count+=1\n Q=np.hstack((Q,elem))\n c=np.linalg.lstsq(Q,V2) \n c=c[0]\n theta=-0.5*np.arctan(c[11]/(c[10]-c[9]))\n Af=0.5*(c[9]*(1+1./np.cos(2*theta))+c[10]*(1-1./np.cos(2*theta)))\n Bf=0.5*(c[9]*(1-1./np.cos(2*theta))+c[10]*(1+1./np.cos(2*theta)))\n theta=180.*theta/np.pi\n return (Af, Bf, theta)", "def polyder_vec(p, m):\n factorial = np.math.factorial\n m = np.asarray(m, dtype=int) # order of derivative\n p = np.atleast_2d(p)\n order = p.shape[1] - 1\n\n D = np.arange(order, -1, -1)\n num = np.array([factorial(i) for i in D], dtype=object)\n den = np.array([factorial(max(i - m, 0)) for i in D], dtype=object)\n D = (num // den).astype(p.dtype)\n\n p = np.roll(D * p, m, axis=1)\n idx = np.arange(p.shape[1])\n p = np.where(idx < m, 0, p)\n\n return p", "def project_point_plane(point, plane):\n base, normal = plane\n normal = normalize_vector(normal)\n vector = subtract_vectors(point, base)\n snormal = scale_vector(normal, dot_vectors(vector, normal))\n return subtract_vectors(point, snormal)", "def drawLine3D(x0,y0,z0,x1,y1,z1):\n dislin.strt3d(x0,y0,z0)\n dislin.conn3d(x1,y1,z1)", "def toBarycentric(self, p: Vec3) -> Vec3:\n abc = triangleArea(self.a.position, self.b.position, self.c.position)\n pbc = triangleArea(p, self.b.position, self.c.position)\n apc = triangleArea(self.a.position, p, self.c.position)\n\n if abc == 0.0:\n return Vec3(0, 0, 0)\n\n x = pbc / abc\n y = apc / abc\n return Vec3(x, y, 1.0 - x - y)" ]
[ "0.6800761", "0.64928424", "0.63452107", "0.6339828", "0.60856456", "0.6082376", "0.6082376", "0.59750146", "0.59033185", "0.5879611", "0.58655274", "0.5820088", "0.58171314", "0.5814186", "0.5794391", "0.5717215", "0.56910175", "0.56728375", "0.5667023", "0.5649503", "0.56087077", "0.56007695", "0.55867684", "0.5574368", "0.55700254", "0.5569092", "0.5560942", "0.55336773", "0.5491676", "0.54599196", "0.5457528", "0.5453317", "0.5448213", "0.5448178", "0.5442942", "0.54377675", "0.5434769", "0.5427082", "0.5420256", "0.5420121", "0.5416841", "0.5394599", "0.53913295", "0.53831154", "0.5382586", "0.53813213", "0.5380586", "0.5377936", "0.5376367", "0.5365603", "0.53650063", "0.53618586", "0.53606457", "0.53466076", "0.53441924", "0.5344089", "0.5327191", "0.53253317", "0.53233695", "0.53192073", "0.53017247", "0.5300519", "0.5296104", "0.52874637", "0.5284678", "0.528335", "0.5274168", "0.52682185", "0.52550787", "0.52520436", "0.5250624", "0.52416956", "0.5238124", "0.52376163", "0.5235404", "0.523007", "0.52258873", "0.52202183", "0.5205746", "0.5204786", "0.52041143", "0.5201275", "0.52010775", "0.52010065", "0.52001476", "0.51780045", "0.51745045", "0.5170646", "0.51677316", "0.51614535", "0.5156292", "0.5141556", "0.51213455", "0.51154256", "0.5112255", "0.51037395", "0.50978345", "0.50936335", "0.5092573", "0.5072776" ]
0.7622919
0
Run a sha1 of the file and return the result
def calchash(filename): sha = hashlib.sha1() with open(filename, 'rb') as f: sha.update(f.read()) return sha
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetFileSha1(file_path):\n return base64.b64encode(GetFileHashes(file_path, do_sha1=True)['sha1'])", "def _calc_sha1(path):\n calc = hashlib.sha1()\n with open(path, 'r') as f:\n calc.update(f.read())\n return calc.hexdigest()", "def sha1(fname):\n fh = open(fname, 'rb')\n sha1 = hashlib.sha1()\n block = fh.read(2 ** 16)\n while len(block) > 0:\n sha1.update(block)\n block = fh.read(2 ** 16)\n\n return sha1.hexdigest()", "def sha1sum(filename):\n if not os.path.isfile(filename):\n return ''\n hasher = hashlib.sha1()\n with open(filename, 'rb') as hash_file:\n buf = hash_file.read(HASH_BLOCK_SIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = hash_file.read(HASH_BLOCK_SIZE)\n return hasher.hexdigest()", "def sha1sum(filename):\n with open(filename, mode='rb') as f:\n d = hashlib.sha1()\n for buf in iter(functools.partial(f.read, 1024*100), b''):\n d.update(buf)\n return d.hexdigest()", "def sha1HashFile(self, filename: Path):\n bufferSize = 65536\n sha1Hash = hashlib.sha1()\n\n with filename.open('rb') as f:\n while True:\n data = f.read(bufferSize)\n\n if not data:\n break\n\n sha1Hash.update(data)\n\n return str(sha1Hash.hexdigest())", "def sha_hash(file_name: str):\n BLOCKSIZE = 65536\n line = '' # format one line for hash\n with open(file_name, 'rb') as afile:\n buf = afile.read(BLOCKSIZE) # read each line of doc\n while len(buf) > 0:\n line += buf.decode('utf-8')\n buf = afile.read(BLOCKSIZE)\n\n hex = \"0x\" + sha1(line.encode()) # create sha1 hash\n return int(hex, 0)", "def _get_sha1(file_descriptor):\n sha1 = hashlib.sha1()\n for block in iter(partial(file_descriptor.read, BLOCK_SIZE), ''):\n sha1.update(block)\n file_descriptor.seek(0)\n return sha1.hexdigest()", "def hash_file_sha1(file_path, binary=False, buffer_size=65536):\n return hash_file(file_path, hash_type=hashlib.sha1, binary=binary, buffer_size=buffer_size)", "def hashfile(filename):\n BLOCKSIZE = 65536\n sha1 = hashlib.sha1()\n with open(filename, 'rb') as afile:\n buf = afile.read(BLOCKSIZE)\n while len(buf) > 0:\n sha1.update(buf)\n buf = afile.read(BLOCKSIZE)\n return(sha1.hexdigest())", "def hash_file ( filename ):\n sha1 = hashlib.sha1()\n with open( filename, 'rb' ) as f:\n while True:\n buf = f.read(65536) # read by 64kb buffers size\n if not buf:\n break\n sha1.update(buf)\n return sha1", "def hash_file(filename):\r\n\r\n # make a hash object\r\n h = hashlib.sha1()\r\n\r\n # open file for reading in binary mode\r\n with open(filename,'rb') as file:\r\n\r\n # loop till the end of the file\r\n chunk = 0\r\n while chunk != b'':\r\n # read only 1024 bytes at a time\r\n chunk = file.read(1024)\r\n h.update(chunk)\r\n\r\n # return the hex representation of digest\r\n return h.hexdigest()", "def file_sha1(file_name, ignore_format=False, max_call_times=None):\r\n _FILE_SLIM = 65536 # read stuff in 64kb chunks!\r\n call_times = 0\r\n my_sha1 = hashlib.sha1()\r\n with open(file_name, \"rb\") as ob:\r\n while True:\r\n data = ob.read(_FILE_SLIM)\r\n if not data:\r\n break\r\n if ignore_format:\r\n data = data.decode(encoding=\"utf-8\")\r\n data = data.replace(\"\\r\", '')\r\n data = data.replace(\"\\n\", '')\r\n data = data.encode(encoding=\"utf-8\")\r\n if max_call_times:\r\n call_times += 1\r\n if call_times > max_call_times:\r\n break\r\n my_sha1.update(data)\r\n return my_sha1.hexdigest()", "def hash_file(filename):\n # make a hash object\n h = hashlib.sha1()\n \n # open file for reading in binary mode\n with open(filename,'rb') as file:\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n # return the hex representation of digest\n return h.hexdigest()", "def hash_file(filename):\n # make a hash object\n h = hashlib.sha1()\n \n # open file for reading in binary mode\n with open(filename,'rb') as file:\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n # return the hex representation of digest\n return h.hexdigest()", "def hash_file(filename):\n # make a hash object\n h = hashlib.sha1()\n # open file for reading in binary mode\n with open(filename,'rb') as file:\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n # return the hex representation of digest\n return h.hexdigest()", "def computeHash(infile):\n f = open(infile, 'rb')\n buffer = f.read()\n f.close()\n return hashlib.sha1(buffer).hexdigest()", "def hash_file(filename):\n\n # make a hash object\n h = hashlib.sha1()\n\n # open file for reading in binary mode\n with open(filename, 'rb') as file:\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n\n # return the hex representation of digest\n return h.hexdigest()", "def get_sha1(src: str) -> str:\n if not isinstance(src, str) or src == \"\":\n raise Exception(\"Invalid src str\")\n i = io.BytesIO(bytearray(src, encoding='utf-8'))\n return get_sha1_from_stream(i)", "def compute_hash(fileName):\n m = hashlib.sha1()\n try:\n fd = open(fileName,\"rb\")\n except IOError:\n print (\"Unable to open the file in readmode:\", fileName)\n return\n content = fd.readlines()\n fd.close()\n for eachLine in content:\n m.update(eachLine)\n return m.hexdigest()", "def cksum(filename):\n hash, err = Popen([\"cksum\", filename], stdout=PIPE, stderr=PIPE).communicate()\n if err != '':\n raise Exception(\"Error hashing {filename}\".format(**locals()))\n return hash.split(\" \")[0]", "def SHA1(self) -> _n_0_t_3[_n_0_t_9]:", "def hash_file(file_name):\n BLOCKSIZE = 65536\n hasher = hashlib.sha1()\n with open(file_name, 'rb') as afile:\n buf = afile.read(BLOCKSIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(BLOCKSIZE)\n return(hasher.hexdigest())", "def get_content_sha1(self):", "def get_hash(file_buffer):\n data = file_buffer.read()\n hasher = sha1()\n hasher.update(data)\n return hasher.hexdigest()", "def get_file_sha(full_path):\n in_file = open(full_path, 'rb')\n try:\n # Bug: why doesn't this use sha_func?\n sha_value = sha1()\n while True:\n bytes = in_file.read(READ_CHUNK_LEN)\n if bytes == \"\":\n break\n sha_value.update(bytes)\n return sha_value.digest()\n finally:\n in_file.close()", "def apkdownloadmirror_get_sha1_sum(soup, **_):\n return soup.find(text=re.compile(r'File APK Sha1:')).next.text.strip()", "def sha1(data):\n\n d = rpki.POW.Digest(rpki.POW.SHA1_DIGEST)\n d.update(data)\n return d.digest()", "def hash(path):\n\n with open(path, 'r') as file:\n return hashlib.sha1(file.read()).hexdigest()", "def check_sha1(filename, sha1_hash):\n sha1 = hashlib.sha1()\n with open(filename, 'rb') as f:\n while True:\n data = f.read(1048576)\n if not data:\n break\n sha1.update(data)\n\n sha1_file = sha1.hexdigest()\n l = min(len(sha1_file), len(sha1_hash))\n return sha1.hexdigest()[0:l] == sha1_hash[0:l]", "def sha1(self) -> str:\n return self.data.sha1", "def hashFile(path: str) -> str:\n\tif not os.path.exists(path):\n\t\traise FileNotFoundError\n\n\thasher = hashlib.sha1()\n\tblock_sz = 8192\n\twith open(path, 'rb') as f:\n\t\tbuf = f.read(block_sz)\n\t\twhile len(buf) > 0:\n\t\t\thasher.update(buf)\n\t\t\tbuf = f.read(block_sz)\n\treturn str(hasher.hexdigest())", "def get_sha1_from_stream(src: io.IOBase) -> str:\n if not isinstance(src, io.IOBase) or not src.readable():\n raise Exception(\"src is not stream or unreadable\")\n m: hashlib._hashlib.HASH = hashlib.sha1()\n return calc_hash(src, m)", "def sha1sum(filename, blocksize=65536):\n hash = hashlib.sha1()\n with open(filename, \"rb\") as f:\n for block in iter(lambda: f.read(blocksize), b\"\"):\n hash.update(block)\n return hash.hexdigest()", "def run(fpath, sig_key=None):\n # Check that hashlib is available\n try:\n import hashlib\n except ModuleNotFoundError:\n sys.exit('Error: hashlib not found.')\n\n if not os.path.isfile(fpath):\n sys.exit('File Error: Supplied filepath does not refer a valid file.')\n \n # Initialize hasher and iterate through blocks of file\n hasher = hashlib.sha256()\n with open(fpath, 'rb') as file:\n for chunk in iter(partial(file.read, BLOCK_SIZE), b''):\n hasher.update(chunk)\n if sig_key is not None:\n hasher.update(sig_key)\n\n res = (hasher.hexdigest(), os.path.basename(fpath))\n # print('{} {}'.format(res[0], res[1]))\n\n return hasher.hexdigest()", "def _hash_file_content(self, path):\n hasher = hashlib.sha1()\n with open(path, 'rb') as file:\n buffer = file.read(self.hash_block_size)\n while len(buffer) > 0:\n hasher.update(buffer)\n buffer = file.read(self.hash_block_size)\n return hasher.hexdigest()", "def hash_file(filepath):\n digest = hashlib.sha1()\n with open(filepath, 'rb') as f:\n while True:\n chunk = f.read(1024*1024)\n if not chunk:\n break\n digest.update(chunk)\n return digest.hexdigest()", "def hash_file_native(file_path, tool=\"sha256sum\"):\n output = subprocess.check_output([tool, file_path], shell=False)\n return output.decode(\"utf-8\").partition(\" \")[0].strip()", "def checksumFile(filename):\n return md5File(filename)", "def hash_file(self, file_path, file_arcname):\n\n file_path = os.path.abspath(file_path)\n\n # If the file_arcname argument is None use the base file name as the\n # arc name\n if file_arcname is None:\n file_arcname = os.path.basename(file_path)\n\n if not os.path.exists(file_path):\n task_error(\"%s doesn't exist\" % file_path)\n if not os.access(file_path, os.R_OK):\n task_error(\"Can't read from %s\" % file_path)\n\n file_mode = os.stat(file_path)[stat.ST_MODE]\n if not stat.S_ISDIR(file_mode) and not stat.S_ISREG(file_mode):\n task_error(\"Unknown file type for %s\" % file_path)\n\n file_in = None\n try:\n # open to read binary. This is important.\n file_in = open(file_path, 'rb')\n except IOError:\n task_error(\"Couldn't read from file: %s\" % file_path)\n\n # hash file 1Mb at a time\n hashval = hashlib.sha1()\n while True:\n data = file_in.read(1024 * 1024)\n if not data:\n break\n hashval.update(data)\n\n # update file bundle status\n\n self.running_size += len(data)\n\n self.percent_complete = 100.0 * self.running_size / self.bundle_size\n\n # only update significant progress\n if self.percent_complete - self.last_percent > 1:\n self.report_percent_complete()\n self.last_percent = self.percent_complete\n\n file_hash = hashval.hexdigest()\n\n # print 'hash: ' + file_hash\n file_in.close()\n\n modified_name = os.path.join('data', file_arcname)\n (file_dir, file_name) = os.path.split(modified_name)\n\n # linuxfy the directory\n file_dir = file_dir.replace('\\\\', '/')\n\n info = {}\n info['size'] = os.path.getsize(file_path)\n mime_type = mimetypes.guess_type(file_path, strict=True)[0]\n\n info['mimetype'] = mime_type if mime_type is not None else 'application/octet-stream'\n info['name'] = file_name\n info['mtime'] = DT.datetime.utcfromtimestamp(int(os.path.getmtime(file_path))).isoformat()\n info['ctime'] = DT.datetime.utcfromtimestamp(int(os.path.getctime(file_path))).isoformat()\n info['destinationTable'] = 'Files'\n info['subdir'] = file_dir\n info['hashsum'] = file_hash\n info['hashtype'] = 'sha1'\n\n # todo make sure errors bubble up without crashing\n if file_arcname in self.file_meta:\n print file_arcname\n task_error(\n \"Different file with the same arcname is already in the bundle\")\n return\n\n return info", "def sha1Function():\r\n\r\n sha1Input = input(\"Enter SHA-1 String: \") # user input for hashing\r\n \r\n sha1Result = hashlib.sha1(sha1Input.encode()) # encoding user input then sending to sha1() function\r\n \r\n print(\"Hashing Successful\")\r\n print(\"The SHA-1 Hashing Result is : \", end =\"\") \r\n print(sha1Result.hexdigest()) # printing the hashing result in hexadecimal value\r\n\r\n menu() # display the menu again\r", "def _get_sha_metadata(filename):\n with open(filename) as f:\n return hashlib.sha1(f.read()).hexdigest()", "def hash_file(filename):\n\n # make a hash object\n h = hashlib.sha256()\n\n # open file for reading in binary mode\n with open(filename,'rb') as file:\n\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n\n # return the hex representation of digest\n return h.hexdigest()", "def hash_file(file_to_hash):\n print(\"Hashing \" + file_to_hash + \"...\")\n hash_algorithm = hashlib.sha256()\n file = open(file_to_hash, 'rb')\n while True:\n contents = file.read(65536)\n if not contents:\n break\n hash_algorithm.update(contents)\n hash_str = hash_algorithm.hexdigest()\n return hash_str", "def compute_checksum(filename):\n cmd = 'md5sum ' + filename\n return pipe(cmd)", "def checksum(file):\n\n cksm = hashlib.sha256()\n f = open(file, 'rb')\n try:\n cksm.update(f.read())\n finally:\n f.close()\n return cksm.hexdigest()", "def hash_of_file(path):\n with open(path, 'rb') as archive:\n sha = sha256()\n while True:\n data = archive.read(2 ** 20)\n if not data:\n break\n sha.update(data)\n return encoded_hash(sha)", "def _sha1(self):\n return hashlib.sha1(self._blob).hexdigest()", "def sha1(self, s):\n\t\tself.sha1_calls += 1\n\t\treturn int(hashlib.sha1(s).hexdigest(), 16)", "def checksum_from_sha1(value):\n # More constrained regex at lexer level\n CHECKSUM_RE = re.compile('SHA1:\\\\s*([\\\\S]+)', re.UNICODE)\n match = CHECKSUM_RE.match(value)\n if match:\n return checksum.Algorithm(identifier='SHA1', value=match.group(1))\n else:\n return None", "def hash_file(method, path):\n f = open(path, \"rb\")\n h = method()\n while True:\n buf = f.read(BUFSIZE)\n if not buf:\n break\n h.update(buf)\n return h.hexdigest()", "def sigfile(fpath):\n sigsha = hashlib.sha1()\n fbj = open(fpath, 'rb')\n try:\n sigsha.update(fbj.read()) # pylint: disable-msg=E1101\n finally:\n fbj.close()\n return sigsha.hexdigest()", "def test_same_sha(self):\n self.create_archive(fields={}, files={\"foo\": \"bar\"})\n file_ = File.objects.create()\n file_.putfile(BytesIO(b\"bar\"))\n self.create_release_file(file=file_)\n\n index = read_artifact_index(self.release, None)\n assert file_.checksum == index[\"files\"][\"fake://foo\"][\"sha1\"]", "def sha1(self):\n return self.tag(\"sha1\")", "def hashfile(file):\n\n hasher = hashlib.sha256()\n\n with open(file, 'rb') as afile:\n buf = afile.read(BLOCKSIZE)\n hasher.update(buf)\n\n return(hasher.hexdigest())", "def hash_file(path, digest=None):\r\n digest = digest or hashlib.sha1()\r\n with open(path, 'rb') as fd:\r\n s = fd.read(8192)\r\n while s:\r\n digest.update(s)\r\n s = fd.read(8192)\r\n return digest.hexdigest()", "def get_hash(content):\n return hashlib.sha1(content).hexdigest()", "def sha256sum(filename):\n if not os.path.isfile(filename):\n return ''\n hasher = hashlib.sha256()\n with open(filename, 'rb') as hash_file:\n buf = hash_file.read(HASH_BLOCK_SIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = hash_file.read(HASH_BLOCK_SIZE)\n return hasher.hexdigest()", "def sha256sum(filename):\n content = open(filename, 'rb').read()\n sha256_obj = hashlib.sha256(content)\n return sha256_obj.hexdigest()", "def FNV1Hash(filename):\n \n FNV1_32_INIT = 0x811c9dc5\n FNV1_PRIME_32 = 16777619\n\n lowerName = filename.lower()\n \n _hash = FNV1_32_INIT\n uint32_max = 2 ** 32\n \n for c in lowerName:\n _hash = (_hash * FNV1_PRIME_32) % uint32_max\n _hash = _hash ^ ord(c)\n return format(_hash, 'x')", "def checksum(self, filepath) -> str:\n if os.path.exists(filepath):\n hash_md5 = md5()\n with open(filepath, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return urlsafe_b64encode(hash_md5.digest()).decode('utf-8')\n\n return \"\"", "def getHashFile(file):\n try:\n fileContent = open(file, 'rb').read()\n except:\n raise IOError, \"No such file...\"\n return False\n return getHash(fileContent)", "def calculate_hash(filename, raise_on_not_found = False):\n if not is_file(filename) and not raise_on_not_found:\n return \"NOTFOUND\"\n\n with open(filename, \"rb\") as file:\n sha256 = hashlib.sha256()\n buf = file.read(128)\n while len(buf) > 0:\n sha256.update(buf)\n buf = file.read(128)\n return str(binascii.hexlify(sha256.digest()), \"utf8\")", "def checksum(path):\n with open(path, 'r') as f:\n return md5(f.read()).digest()", "def computeHash(filename):\n fileHash = hashlib.sha256()\n with open(filename, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n fileHash.update(chunk)\n return fileHash.hexdigest()", "def checksum_of(filepath):\n bfsz = 10240000 # 10 MB buffer\n sum = hashlib.sha256()\n with open(filepath) as fd:\n while True:\n buf = fd.read(bfsz)\n if not buf: break\n sum.update(buf)\n return sum.hexdigest()", "def generate_sum(file_path):\n #file = open(file_path, 'rb')\n #header = file.read()\n header = open(file_path, 'rb').read()\n suma_md5 = md5(header).hexdigest()\n return suma_md5", "def hashfile(self, afile):\n # encode_buffer = False\n\n buf = afile.read(self.blocksize)\n while buf:\n # Need to use string-escape for Python 2 non-unicode strings. For\n # Python 2 unicode strings and all Python 3 strings, we need to use\n # unicode-escape. The effect of them is the same.\n if isinstance(buf, str):\n buf = buf.encode('unicode-escape')\n\n self.hasher.update(buf)\n buf = afile.read(self.blocksize)\n return self.hasher.hexdigest()", "def readfile(self, filename):\n \n f = file(filename,'rb');\n #print \"\\nReading %s \\n\" % f.name;\n m = md5.new();\n readBytes = 1024; # read 1024 bytes per time\n totalBytes = 0;\n while (readBytes):\n readString = f.read(readBytes);\n m.update(readString);\n readBytes = len(readString);\n totalBytes+=readBytes;\n f.close();\n \n return m.hexdigest()", "def check_hash(self, fname, args):\n fobj = self._open_file(fname)\n\n rc = 0\n format_errors = 0\n hash_errors = 0\n read_errors = 0\n for idx, line in enumerate(fobj):\n # remove any newline characters\n m = self.CHECK_RE.match(line.strip())\n if not m:\n if args.warn:\n self.app.stderr.write(\n 'hasher {0}: {1}: {2}: improperly formatted {3}'\n ' checksum line\\n'.format(self.name, fname, idx + 1,\n self.name.upper()))\n format_errors += 1\n rc = 1\n continue\n hash_value, binary, check_file = m.groups()\n\n try:\n check_f = open(check_file, 'rb' if binary == '*' else 'r')\n except IOError:\n self.app.stderr.write(\n 'hasher {0}: {1}: No such file or directory\\n'.format(\n self.name, check_file))\n if not args.status:\n self.app.stdout.write(\n STATUS_MSG.format(check_file, READ_ERROR))\n read_errors += 1\n rc = 1\n continue\n\n if self._calculate_hash(check_f) == hash_value:\n if not (args.quiet or args.status):\n self.app.stdout.write(\n STATUS_MSG.format(check_file, SUCCESS))\n else:\n if not args.status:\n self.app.stdout.write(\n STATUS_MSG.format(check_file, HASH_ERROR))\n hash_errors += 1\n rc = 1\n\n if format_errors and not args.status:\n self.app.stderr.write(\n 'hasher {0}: WARNING: {1} line{2} {3} improperly'\n ' formatted\\n'.format(\n self.name,\n format_errors,\n 's' if format_errors > 1 else '',\n 'are' if format_errors > 1 else 'is',\n ))\n if read_errors and not args.status:\n self.app.stderr.write(\n 'hasher {0}: WARNING: {1} listed file{2}'\n ' could not be read\\n'.format(\n self.name,\n read_errors,\n 's' if read_errors > 1 else '',\n ))\n if hash_errors and not args.status:\n self.app.stderr.write(\n 'hasher {0}: WARNING: {1} computed checksum{2}'\n ' did NOT match\\n'.format(\n self.name,\n hash_errors,\n 's' if hash_errors > 1 else '',\n ))\n return rc", "def sha256sum(filename):\n with open(filename, 'rb') as f:\n m = hashlib.sha256()\n while True:\n data = f.read(8192)\n if not data:\n break\n m.update(data)\n return m.hexdigest()", "def get_checksum(filename):\n # You could use popen here. I read about it, and subprocess is meant\n # to replace os.popen, so I used it instead.\n\n # First, run the command md5 sum with filename as input.\n # It's stored as a subprocess.CompletedProcess\n process = subprocess.run(['md5sum',filename], capture_output=True)\n \n # Use the method stdout from subprocess.CompletedProcess (seen in\n # the Python docs) to get the output. As seen in the book, md5sum will\n # output the checksum follwed by the filename. split() will put\n # those two elements into a list, and [0] will take the first element,\n # which will be the checksum.\n checksum = process.stdout.split()[0]\n return checksum", "def git_sha1_commit():\n return local('git rev-parse --short HEAD', capture=True)", "def get_checksum(input_fname):\n with open(input_fname, \"rb\") as infile:\n file_contents = infile.read()\n\n checksum = hashlib.md5(file_contents).hexdigest()\n return checksum", "def file_digest(file):\n # 'rb' file mode reads the file as bytes\n input_file = open(file, 'rb')\n data = input_file.read()\n # getting the digest\n digest = hash_comparing(data).hexdigest()\n input_file.close()\n return digest", "def _sha1_hash_json(self, value):\n hash = hashlib.new(\"sha1\")\n binary_value = value.encode(\"ascii\")\n hash.update(binary_value)\n sha1_res = hash.hexdigest()\n return sha1_res", "def semhash(file):\n _hash_helper(file)", "def semhash(file):\n _hash_helper(file)", "def calculate_md5sum_of_a_file(context, file_name, file_path):\n command = \"md5sum \" + file_path + \"/\" + file_name + \" | awk {'print $1'}\"\n return context.cme_session.send_ssh_command(command=command)", "def hash_from_file(file_path):\r\n return hash_from_code(open(file_path, 'rb').read())", "def ukey(self, path):\n out = self._call(\"GETFILECHECKSUM\", path=path, redirect=False)\n if \"Location\" in out.headers:\n location = self._apply_proxy(out.headers[\"Location\"])\n out2 = self.session.get(location)\n out2.raise_for_status()\n return out2.json()[\"FileChecksum\"]\n else:\n out.raise_for_status()\n return out.json()[\"FileChecksum\"]", "def hash_for_file(file_name, block_size=2 ** 20):\n hasher = SHA256.new()\n source_file = open(file_name, \"r\")\n\n while True:\n data = source_file.read(block_size)\n if not data:\n break\n hasher.update(data.encode('utf-8'))\n\n source_file.close()\n return hasher.hexdigest()", "def gdsii_hash(filename, engine=None):\n with open(filename, 'rb') as fin:\n data = fin.read()\n contents = []\n start = pos = 0\n while pos < len(data):\n size, rec = struct.unpack('>HH', data[pos:pos + 4])\n if rec == 0x0502:\n start = pos + 28\n elif rec == 0x0700:\n contents.append(data[start:pos])\n pos += size\n h = hashlib.sha1() if engine is None else engine\n for x in sorted(contents):\n h.update(x)\n return h.hexdigest()", "def sha512_file(file_name):\n\n hash_func = hashlib.sha256()\n\n with open(file_name, \"rb\") as fd:\n hash_func.update(fd.read())\n\n return hash_func.hexdigest()", "def get_file_hash (fullpath) : \n\n # This bit was sourced from Stack Overflow via Google, specifically:\n # http://stackoverflow.com/questions/1131220/get-md5-hash-of-a-files-without-open-it-in-python\n\n md5 = hashlib.md5()\n with open(fullpath,'rb') as f: \n for chunk in iter(lambda: f.read(512*md5.block_size), ''): \n md5.update(chunk)\n # Hexdigest is the safe varchar(32) style output\n return md5.hexdigest()", "def fsum(fpath):\n import hashlib\n import codecs\n with codecs.open(fpath, \"r\", \"utf-8\") as filep:\n buff = filep.read()\n cksum = hashlib.md5(buff.encode(\"utf-8\"))\n return cksum.hexdigest()", "def _git_intern_file(self, file_contents, cwd, commit_hash):\n cmd = 'hash-object -t blob -w --stdin'.split(' ')\n stdin = self.api.m.raw_io.input(file_contents)\n stdout = self.api.m.raw_io.output()\n step_name = 'Hashing modified DEPS file with revision ' + commit_hash\n step_result = self.api.m.git(*cmd, cwd=cwd, stdin=stdin, stdout=stdout,\n name=step_name)\n hash_string = step_result.stdout.splitlines()[0]\n try:\n if hash_string:\n int(hash_string, 16)\n return hash_string\n except ValueError: # pragma: no cover\n reason = 'Git did not output a valid hash for the interned file.'\n self.api.m.halt(reason)\n raise self.api.m.step.StepFailure(reason)", "def file_checksum(filename):\n hash_md5 = hashlib.md5()\n with tf.gfile.Open(filename, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n f.close()\n return hash_md5.hexdigest()", "def get_file_sha256(fname):\n with open(fname, 'rb') as afile:\n return base64.b64encode(get_file_hash(afile, hashlib.sha256()))", "def generate_hash(self, fname, args):\n fobj = self._open_file(fname, args.binary)\n hash_value = self._calculate_hash(fobj)\n\n line = '{0} {1}{2}\\n'.format(hash_value, '*' if args.binary else ' ',\n fname)\n\n if '//' in line:\n line = '//' + line.replace('//', '////')\n self.app.stdout.write(line)", "def sha1(s: str) -> str:\n return hashlib.sha1(s.encode()).hexdigest()", "def gdsii_hash(filename, engine=None):\n if hasattr(filename, \"__fspath__\"):\n filename = filename.__fspath__()\n with open(filename, \"rb\") as fin:\n data = fin.read()\n contents = []\n start = pos = 0\n while pos < len(data):\n size, rec = struct.unpack(\">HH\", data[pos : pos + 4])\n if rec == 0x0502:\n start = pos + 28\n elif rec == 0x0700:\n contents.append(data[start:pos])\n elif size == 0:\n warnings.warn(\n \"[GDSPY] Zero-length record found in {0} at position {1}. \"\n \"Skipping the remaining of the file contents.\".format(filename, pos),\n stacklevel=2,\n )\n break\n pos += size\n h = hashlib.sha1() if engine is None else engine\n for x in sorted(contents):\n h.update(x)\n return h.hexdigest()", "def get_sha256_file(filename):\n BLOCKSIZE = 65536\n hasher = hashlib.sha256()\n with open(filename, 'rb') as afile:\n buf = afile.read(BLOCKSIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(BLOCKSIZE)\n return hasher.hexdigest()", "def hash_file(file_like_object):\n checksum = hashlib.sha1()\n for chunk in iter(lambda: file_like_object.read(32768), b''):\n encoded_chunk = (chunk.encode(encoding='utf-8')\n if isinstance(chunk, six.string_types) else chunk)\n checksum.update(encoded_chunk)\n return checksum.hexdigest()", "def svn_fs_file_md5_checksum(*args):\r\n return _fs.svn_fs_file_md5_checksum(*args)", "def get_checksum(self, u_file: 'UserFile') -> str:\n ...", "def test_create_SHA_256_hash_of_file_matches_cosmic_build_tool(\n file_name, expected_hash\n):\n file_path = str(Path(__file__).parent.parent / \"steps/component1\" / file_name)\n hash = utils.create_SHA_256_hash_of_file(file_path)\n\n assert hash == expected_hash", "def _sha1_hash_file(self, config_type):\n config = self.CLOUDWATCH_CONFIG_TYPE_TO_CONFIG_VARIABLE_REPLACE_FUNC. \\\n get(config_type)()\n value = json.dumps(config)\n sha1_res = self._sha1_hash_json(value)\n return sha1_res", "def get_checksum(file_path: str) -> str:\n\n # Open the file in binary mode\n with open(file_path, \"rb\") as file:\n # Create a SHA-256 hash object\n hash_object = hashlib.sha256()\n\n # Iterate over the file in chunks\n for chunk in iter(lambda: file.read(4096), b\"\"):\n # Feed the chunk to the hash object\n hash_object.update(chunk)\n\n # Obtain the checksum in hexadecimal format\n checksum = hash_object.hexdigest()\n\n return checksum", "def get_file_hash(fname, hash_length):\n hash_sha = hashlib.sha256()\n with open(fname, 'rb') as infile:\n for chunk in iter(lambda: infile.read(4096), b''):\n hash_sha.update(chunk)\n hash_sha = hash_sha.hexdigest()\n hash_sha = int(hash_sha, 16) % (2 ** (4 * hash_length))\n return hex_encode(hash_sha, hash_length)" ]
[ "0.7758882", "0.7735032", "0.76928216", "0.75606763", "0.7542691", "0.73954326", "0.7363118", "0.71328443", "0.7127811", "0.71266234", "0.7059906", "0.705759", "0.7057296", "0.70450103", "0.70450103", "0.6978939", "0.6903451", "0.69028926", "0.6887849", "0.6887431", "0.6827436", "0.68080527", "0.67813694", "0.6766135", "0.67653716", "0.67512363", "0.67276216", "0.67060536", "0.667192", "0.66641206", "0.6657363", "0.6638315", "0.6636783", "0.66314554", "0.6584346", "0.65647113", "0.65467215", "0.65445054", "0.6503998", "0.64961153", "0.64076126", "0.63745487", "0.6292124", "0.6275252", "0.62549543", "0.624388", "0.6240218", "0.62380624", "0.62374663", "0.62302613", "0.6220183", "0.6207773", "0.61413914", "0.6127855", "0.61183316", "0.6114331", "0.60881555", "0.6077916", "0.6068627", "0.60618937", "0.60452026", "0.6027545", "0.6024335", "0.60009915", "0.6000755", "0.599807", "0.5965822", "0.5960346", "0.5948931", "0.5945783", "0.59432286", "0.5942943", "0.5923207", "0.5918023", "0.5915761", "0.5910558", "0.59042025", "0.59042025", "0.59032536", "0.59026146", "0.58772755", "0.5871903", "0.58660096", "0.5863246", "0.58475506", "0.5842023", "0.5826899", "0.5820272", "0.5820259", "0.5815941", "0.58155", "0.58110493", "0.5807045", "0.58049816", "0.57984704", "0.57964885", "0.57735205", "0.575968", "0.57580227", "0.57487476" ]
0.7463678
5
Checks to see if the destination exists If a source file is passed in, run a checksum
def checkfile(filename, source=None): if source: # Let's check some sums if os.path.exists(filename) and os.path.exists(source): src_sha = calchash(source) dest_sha = calchash(filename) if DRYRUN: print("{src} hash {src_sha}. {dest} hash {dest_sha}".format(src=source, dest=filename, src_sha=src_sha.hexdigest(), dest_sha=dest_sha.hexdigest())) return src_sha.digest() == dest_sha.digest() else: return os.path.exists(filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checksum_compare(source_file, dest_file):\n\n con_ssh = ControllerClient.get_active_controller()\n\n LOG.info(\"Compare checksums on source file and destination file\")\n cmd = \"getfattr -m . -d {}\"\n\n exitcode, source_sha = con_ssh.exec_cmd(cmd.format(source_file))\n LOG.info(\"Raw source file checksum is: {}\".format(source_sha))\n source_sha2 = source_sha.split(\"\\n\")\n print(\"This is source_sha2: {}\".format(source_sha2))\n assert source_sha2 != [''], \"No signature on source file\"\n\n if source_file.startswith(\"/\"):\n source_sha = source_sha2[2] + \" \" + source_sha2[3]\n else:\n source_sha = source_sha2[1] + \" \" + source_sha2[2]\n\n LOG.info(\"Extracted source file checksum: {}\".format(source_sha))\n\n exitcode, dest_sha = con_ssh.exec_cmd(cmd.format(dest_file))\n LOG.info(\"Raw symlink checksum is: {}\".format(dest_sha))\n dest_sha2 = dest_sha.split(\"\\n\")\n\n if dest_file.startswith(\"/\"):\n dest_sha = dest_sha2[2] + \" \" + dest_sha2[3]\n else:\n dest_sha = dest_sha2[1] + \" \" + dest_sha2[2]\n\n LOG.info(\"Extracted destination file checksum: {}\".format(dest_sha))\n\n if source_sha == dest_sha:\n return True\n else:\n return False", "def verifyFile(source, destination):\n\tsourceHash = hashlib.sha256(open(source, 'rb').read()).digest()\n\tdestinationHash = hashlib.sha256(open(destination, 'rb').read()).digest()\n\n\tif sourceHash == destinationHash:\n\t\treturn (True, str(sourceHash))\n\n\treturn False", "def file_copy_remote_exists(self, src, dest=None, file_system=None):\n self.enable()\n if file_system is None:\n file_system = self._get_file_system()\n\n file_copy = self._file_copy_instance(src, dest, file_system=file_system)\n if file_copy.check_file_exists() and file_copy.compare_md5():\n log.debug(\"Host %s: File %s already exists on remote.\", self.host, src)\n return True\n\n log.debug(\"Host %s: File %s does not already exist on remote.\", self.host, src)\n return False", "def _install_file(srcdir, filename, dstdir):\n srcfilename = os.path.join(srcdir, filename)\n dstfilename = os.path.join(dstdir, filename)\n if not os.path.exists(srcfilename):\n if os.path.exists(dstfilename):\n subprocess.run(['rm', dstfilename], check=True)\n return (False, True)\n return (False, False)\n\n equal = subprocess.run(['diff', '-q', srcfilename, dstfilename],\n check=False,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL).returncode == 0\n if not equal:\n subprocess.run(['mv', srcfilename, dstfilename], check=True)\n return (True, not equal)", "def __copyfile(source, destination):\n logger.info(\"copyfile: %s -> %s\" % (source, destination))\n try:\n __create_destdir(destination)\n shutil.copy(source, destination)\n return True\n except Exception as e:\n logger.error(\n \"copyfile: %s -> %s failed! Error: %s\", source, destination, e\n )\n return False", "def match_stat(dest_path, source_path):\n return shutil.copystat(source_path, dest_path)", "def moveFile(src, dest, bak=\"bak\"):\n\t\n\tmessage = \"processing: {0} -> {1}\".format(src, dest)\n\tlogger.info(message)\n\n\t#compare the source and destination, if the files are the same do nothing\n\tif os.path.exists(src) and os.path.exists(dest): \n\t\tmessage = \"file {0} found, comparing to {1}\".format(src, dest)\n\t\tlogger.info(message)\n\t\t(fileCheck, fileSig) = verifyFile(src, dest)\n\t\tif fileCheck:\n\t\t\tmessage = \"source file {0} matches destination file {1}\".format(src, dest)\n\t\t\tlogger.info(message)\n\t\t\treturn True\n\t\t\n\t#checks to see if the destination file exists, then creates a backup of it\n\tif os.path.exists(dest):\n\t\tbackupFileName = \"{0}.{1}\".format(dest, bak)\n\t\tmessage = \"file {0} exists, creating backup: {1}\".format(dest, backupFileName)\n\t\tlogger.info(message)\n\t\ttry:\n\t\t\tshutil.move(dest, backupFileName)\n\t\texcept IOError as errorMessage:\n\t\t\tlogger.error(errorMessage)\n\t\t\treturn False\n\t\t\n\t#attempts to copy the source file to the destination, \n\tif os.path.exists(src):\n\t\tmessage = \"copying {0} to {1})\".format(src, dest)\n\t\ttry:\n\t\t\tshutil.copy(src, dest)\n\t\texcept IOError as errorMessage:\n\t\t\tlogger.error(errorMessage)\n\t\t\tshutil.move(backupFilenName, dest)\n\t\t\treturn False\n\t\t\n\t#verify that files are the same\n\t(fileCheck, fileSig) = verifyFile(src, dest)\n\tif fileCheck:\n\t\tmessage = \"File transfer verified {0} -> {1}\".format(src, dest)\n\t\tlogger.info(message)\n\t\tmessage = \"File Signature for {0}: {1}\".format(src, fileSig)\n\t\tlogger.info(message)\n\t\treturn True\n\telse:\n\t\tmessage = \"file signatures do not match, rolling back {0} -> {1}\".format(backupFileName, dest)\n\t\tlogger.error(message)\n\t\n\t#roll back file\n\ttry:\n\t\tshutil.move(backupFileName, dest)\n\texcept IOError as errorMessage:\n\t\tlogger.error(errorMessage)\n\t\treturn False\n\t\n\treturn True", "def __checkDestination(self):\n return os.path.exists(self.__targetPath)", "def copy_file_check(self):\n pass", "def svn_fs_file_checksum(*args):\r\n return _fs.svn_fs_file_checksum(*args)", "def __copyfile2(source, destination):\n logger.info(\"copyfile2: %s -> %s\" % (source, destination))\n try:\n __create_destdir(destination)\n shutil.copy2(source, destination)\n return True\n except Exception as e:\n logger.error(\n \"copyfile2: %s -> %s failed! Error: %s\", source, destination, e\n )\n return False", "def copy_file(source, destination):\n\n try:\n shutil.copy(source, destination)\n except (OSError, IOError):\n return False\n else:\n return True", "def test_managed_local_source_with_source_hash(\n file, tmp_path, grail_scene33_file, grail_scene33_file_hash, proto, dest_file_exists\n):\n name = tmp_path / \"local_source_with_source_hash\"\n\n if dest_file_exists:\n name.touch()\n\n # Test with wrong hash\n bad_hash = grail_scene33_file_hash[::-1]\n\n ret = file.managed(\n name=str(name),\n source=proto + str(grail_scene33_file),\n source_hash=\"sha256={}\".format(bad_hash),\n )\n assert ret.result is False\n assert not ret.changes\n assert \"does not match actual checksum\" in ret.comment\n\n # Now with the right hash\n ret = file.managed(\n name=str(name),\n source=proto + str(grail_scene33_file),\n source_hash=\"sha256={}\".format(grail_scene33_file_hash),\n )\n assert ret.result is True", "def copy(src_file_name, target_folder, file_name):\n\n global CURRENT_PROGRESS\n global TOTAL_FILE_NUM\n CURRENT_PROGRESS += 1\n\n mkdir(target_folder)\n target_file = os.path.join(target_folder, file_name)\n\n if os.path.exists(target_file):\n src_md5 = md5(src_file_name)\n\n # iterate target folder to check if file already exists\n for file_in_target in os.listdir(target_folder):\n entry = os.path.join(target_folder, file_in_target)\n target_md5 = md5(entry)\n if src_md5 == target_md5:\n log(\"(\" + str(CURRENT_PROGRESS) + \"/\" + str(TOTAL_FILE_NUM) + file_name + \"file exists, ignore COPY. <-- \" + src_file_name)\n return\n\n word_list = file_name.split('.')\n num_of_files = len(\n [f for f in os.listdir(target_folder) \\\n if os.path.isfile(os.path.join(target_folder, f))])\n file_name = word_list[0] + '(' + str(num_of_files) + ').' + word_list[1]\n target_file = os.path.join(target_folder, file_name)\n\n if DELETE_AFTER_COPY:\n log(\"(\" + str(CURRENT_PROGRESS) + '/' + str(TOTAL_FILE_NUM) + \")MOVE: \" + src_file_name + \" --->\" + target_file)\n shutil.move(src_file_name, target_file)\n else:\n log(\"(\" + str(CURRENT_PROGRESS) + '/' + str(TOTAL_FILE_NUM) + \")COPY: \" + src_file_name + \" --->\" + target_file)\n shutil.copy(src_file_name, target_file)", "def download_and_validate_checksum(name, checksum):\n dst = os.path.join(DOWNLOADS_DIR, os.path.basename(name))\n download_file(src=name, dst=dst)\n md5 = hashlib.md5()\n for chunk in chunked_reader(dst):\n md5.update(chunk)\n dl_checksum = md5.digest().hex()\n if dl_checksum != checksum:\n raise ValueError(f\"expected checksum {checksum} but received {dl_checksum}\")\n os.remove(dst)", "def _copy_if_not_exists(source: pl.Path, destination: pl.Path) -> None:\n if destination.is_dir():\n destination_file = destination / source.name\n else:\n destination_file = destination\n if not destination_file.exists():\n su.copy(source, destination)", "def sync_file(source, dest, dryrun=False, diff=False):\n if diff:\n if not exists(dest):\n logger.info((\"Destination '{}' does not exist:\".format(dest),\n \" skipping diff\"))\n return\n with open(source) as a:\n with open(dest) as b:\n s1 = a.readlines()\n s2 = b.readlines()\n sys.stdout.writelines(\n difflib.unified_diff(s1, s2, fromfile=source, tofile=dest))\n return\n if not exists(dest):\n if dryrun:\n logger.info(\"DRY_RUN: Copying rule '{}' to '{}'\".format(\n source, dest))\n else:\n if not exists(dirname(dest)):\n os.makedirs(dirname(dest))\n logger.info(\"Copying rule '{}' to '{}'\".format(source, dest))\n shutil.copy2(source, dest)\n else:\n equal = filecmp.cmp(source, dest)\n if (not equal):\n if dryrun:\n logger.info(\"DRY_RUN: Updating rule '{}' to '{}'\".format(\n source, dest))\n else:\n logger.info(\"Updating rule '{}' to '{}'\".format(source, dest))\n shutil.copy2(source, dest)\n else:\n if dryrun:\n logger.info(\"DRY_RUN: rule '{}' up to date\".format(dest))\n else:\n logger.info(\"rule '{}' up to date\".format(dest))", "def copyFile(source,destination):\r\n logging.info(\"source\",source)\r\n logging.info(\"destination\",destination)\r\n try:\r\n shutil.copy(source, destination)\r\n logging.info(\"File copied successfully.\")\r\n \"\"\"If source and destination are same\"\"\"\r\n except shutil.SameFileError:\r\n logging.info(\"File not copied sucessfuly.\")\r\n \"\"\"List files and directories\"\"\"\r\n logging.info(\"After copying file:\")\r\n logging.info(os.listdir(destination))\r\n \"\"\"logging.info path of newly\r\n created file\"\"\"\r\n logging.info(\"Destination path:\", destination)", "def copy_to_local(src_file):\r\n if not_exists(src_file, \"Source File\"):\r\n return 1, 0\r\n _local_file = os.path.basename(src_file)\r\n if wrap_cp_file(src_file, _local_file):\r\n return 1, 0\r\n return 0, _local_file", "def copy_to_local(src_file):\r\n if not_exists(src_file, \"Source File\"):\r\n return 1, 0\r\n _local_file = os.path.basename(src_file)\r\n if wrap_cp_file(src_file, _local_file):\r\n return 1, 0\r\n return 0, _local_file", "def check_file_transferred(replica, location):\n\n from tardis.tardis_portal.models import Dataset_File\n datafile = Dataset_File.objects.get(pk=replica.datafile.id)\n\n # If the remote is capable, get it to send us the checksums and / or\n # file length for its copy of the file\n try:\n # Fetch the remote's metadata for the file\n m = location.provider.get_metadata(replica)\n _check_attribute(m, datafile.size, 'length')\n if (_check_attribute(m, datafile.sha512sum, 'sha512sum') or \\\n _check_attribute(m, datafile.md5sum, 'md5sum')):\n return True\n if location.trust_length and \\\n _check_attribute(m, datafile.size, 'length') :\n return False\n raise MigrationError('Not enough metadata for verification')\n except NotImplementedError:\n pass\n except HTTPError as e:\n # Bad request means that the remote didn't recognize the query\n if e.code != 400:\n raise\n\n if location.provider.trust_length :\n try:\n length = location.provider.get_length(replica)\n if _check_attribute2(length, datafile.size, 'length'):\n return False\n except NotImplementedError:\n pass\n\n # Fetch back the remote file and verify it locally.\n f = location.provider.get_opener(replica)()\n md5sum, sha512sum, size, x = generate_file_checksums(f, None)\n _check_attribute2(str(size), datafile.size, 'length')\n if _check_attribute2(sha512sum, datafile.sha512sum, 'sha512sum') or \\\n _check_attribute2(md5sum, datafile.md5sum, 'md5sum'):\n return True\n raise MigrationError('Not enough metadata for file verification')", "def copy_single_file(src, dest, log):\n if op.exists(src) and op.isfile(src):\n shellutil.copy(src, dest, log=log)\n return True", "def checksumFile(filename):\n return md5File(filename)", "def _check_source (fileurl, path_unzip, outfile) :\n if outfile is not None and os.path.splitext (outfile)[1].lower () == os.path.splitext (fileurl)[1].lower () :\n file = _check_url_file (fileurl, path_download = path_unzip, outfile = outfile)\n return file\n else :\n file = _check_url_file (fileurl, path_download = path_unzip, outfile = None)\n txt = _check_zip_file (file, path_unzip = path_unzip, outfile = outfile)\n if not os.path.exists (txt):\n message = \"hal_core._check_source: unable to find file \" + txt + \" source (\" + fileurl + \")\"\n raise PQHException (message)\n return txt", "def checkExisting(self, dst):\n if dst.exists():\n msg = 'Refusing to clobber existing file \"%s\"' % (\n dst.path,)\n logging.msg(msg)\n raise errors.NoClobber(msg)", "def _rsync(self, source, dest):\n \n #print(source)\n \n # Test SSH connection.\n if not self._ssh('test 1 -eq 1', use_pwd=False):\n print \"Waiting for SSH on %s with key %s\" % (self.address[0], self.key_file)\n time.sleep(1)\n while not self._ssh('test 1 -eq 1', use_pwd=False):\n time.sleep(1)\n\n # Archive, compress, delete extraneous files from dest dirs.\n rsync = ['rsync', '-az', '--delete']\n\n # Use key file\n if self.key_file:\n ssh = 'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o IdentitiesOnly=yes -i \"%s\"'\n rsync.extend(['-e', ssh % self.key_file])\n\n if isinstance(source, list):\n rsync.extend(source)\n rsync.append(dest)\n else:\n rsync.extend([source, dest])\n \n print 'Sync files from %s to %s...' % (source, dest)\n \n if subprocess.call(rsync) == 0:\n return True\n else:\n return False", "def run(self):\n try:\n self.parse_args(None)\n self.execute_command()\n except FileExistsException, e:\n print \"Can't copy file as destination already exists.\"\n print \"Exiting...\"\n except Exception, e:\n print \"Exception occured: %s\\nExiting...\" % e", "def _copy_file ( self, source, dest ):\n return", "def copy_file(src, dst, ignore=None):\n # Sanity checkpoint\n src = re.sub('[^\\w/\\-\\.\\*]', '', src)\n dst = re.sub('[^\\w/\\-\\.\\*]', '', dst)\n if len(re.sub('[\\W]', '', src)) < 5 or len(re.sub('[\\W]', '', dst)) < 5:\n debug.log(\"Error: Copying file failed. Provided paths are invalid! src='%s' dst='%s'\"%(src, dst))\n else:\n # Check destination\n check = False\n if dst[-1] == '/':\n if os.path.exists(dst):\n check = True # Valid Dir\n else:\n debug.log(\"Error: Copying file failed. Destination directory does not exist (%s)\"%(dst)) #DEBUG\n elif os.path.exists(dst):\n if os.path.isdir(dst):\n check = True # Valid Dir\n dst += '/' # Add missing slash\n else:\n debug.log(\"Error: Copying file failed. %s exists!\"%dst)\n elif os.path.exists(os.path.dirname(dst)):\n check = True # Valid file path\n else:\n debug.log(\"Error: Copying file failed. %s is an invalid distination!\"%dst)\n if check:\n # Check source\n files = glob.glob(src)\n if ignore is not None: files = [fil for fil in files if not ignore in fil]\n if len(files) != 0:\n debug.log(\"Copying File(s)...\", \"Copy from %s\"%src, \"to %s\"%dst) #DEBUG\n for file_ in files:\n # Check file exists\n if os.path.isfile(file_):\n debug.log(\"Copying file: %s\"%file_) #DEBUG\n shutil.copy(file_, dst)\n else:\n debug.log(\"Error: Copying file failed. %s is not a regular file!\"%file_) #DEBUG\n else: debug.log(\"Error: Copying file failed. No files were found! (%s)\"%src) #DEBUG", "def copyFile(src_file, dst_file, race_flag):\n\n if race_flag:\n time.sleep( random.randint(5, random.randint(10, 50)) )\n if os.path.exists(dst_file):\n while not sameSize(src_file, dst_file):\n time.sleep(10)\n return None\n\n while(True):\n shutil.copy(src_file, dst_file)\n if sameSize(src_file, dst_file):\n break", "def verify_destination(self, destination):\n # Make sure the text file was copied to the destination.\n text_file = os.path.join(destination, 'notes.txt')\n assert os.path.isfile(text_file)\n with open(text_file) as handle:\n assert handle.read() == \"This file should be included in the backup.\\n\"\n # Make sure the subdirectory was copied to the destination.\n subdirectory = os.path.join(destination, 'subdirectory')\n assert os.path.isdir(subdirectory)\n # Make sure the symbolic link was copied to the destination.\n symlink = os.path.join(subdirectory, 'symbolic-link')\n assert os.path.islink(symlink)", "def test_source_package_checksum_changes(self):\n response = self.client.head(\n f'/filemanager/api/{self.upload_id}/content',\n headers={'Authorization': self.token}\n )\n first_checksum = response.headers.get('ETag')\n self.assertIsNotNone(first_checksum)\n\n response = self.client.post(\n f'/filemanager/api/{self.upload_id}',\n data={'file': (io.BytesIO(b'foocontent'), 'foo.txt'),},\n headers={'Authorization': self.token},\n content_type='multipart/form-data'\n )\n\n second_checksum = response.headers.get('ETag')\n self.assertIsNotNone(second_checksum)\n self.assertNotEqual(first_checksum, second_checksum)\n\n response = self.client.head(\n f'/filemanager/api/{self.upload_id}/content',\n headers={'Authorization': self.token}\n )\n\n third_checksum = response.headers.get('ETag')\n self.assertIsNotNone(third_checksum)\n self.assertEqual(second_checksum, third_checksum)", "def copy(self):\n source = os.path.abspath(self.path)\n destination = os.path.abspath(self.target)\n\n logger.info(\"Running Copy Method - SOURCE=\\\"{src}\\\" DESTINATION=\\\"{dst}\\\" IGNORE=\\\"{ignore}\\\"\".format(src=source, dst=destination, ignore=self.ignore))\n\n if not os.path.exists(source):\n logger.error(\"\\\"{source}\\\" PATH DOESN'T EXIST. PROGRAM TERMINATED. Please check log file.\".format(source=source))\n\n if self.rules is not None:\n files = self.rules\n else:\n self.create_packet_structure(source)\n files = self.files\n\n for (k,v) in files.items():\n src = os.path.join(source,k)\n dst = os.path.join(destination,v)\n dirpath = os.path.dirname(dst)\n if not os.path.isdir(dirpath):\n logger.info(\"Create directory - \\\"{dst}\\\"\".format(dst=dirpath))\n os.makedirs(dirpath)\n logger.info(\"copy from \\\"{f}\\\" to \\\"{t}\\\"\".format(f=src,t=dst))\n shutil.copyfile(src,dst)\n logger.info(\"OK\")", "def CheckForDirFileConflict(self, src_uri, dst_path):\n final_dir = os.path.dirname(dst_path)\n if os.path.isfile(final_dir):\n raise CommandException('Cannot retrieve %s because it a file exists '\n 'where a directory needs to be created (%s).' %\n (src_uri, final_dir))\n if os.path.isdir(dst_path):\n raise CommandException('Cannot retrieve %s because a directory exists '\n '(%s) where the file needs to be created.' %\n (src_uri, dst_path))", "def verify_checksum(path):\n rc = True\n for f in os.listdir(path):\n if f.endswith('.md5'):\n cwd = os.getcwd()\n os.chdir(path)\n with open(os.devnull, \"w\") as fnull:\n try:\n subprocess.check_call(['md5sum', '-c', f], # pylint: disable=not-callable\n stdout=fnull, stderr=fnull)\n LOG.info(\"Checksum file is included and validated.\")\n except Exception as e:\n LOG.exception(e)\n rc = False\n finally:\n os.chdir(cwd)\n return rc\n LOG.info(\"Checksum file is not included, skipping validation.\")\n return rc", "def testDetermineDest(self):\n self.cc.determine_dest('cdl', '/bobsBestDirectory')\n\n dir = os.path.abspath('/bobsBestDirectory')\n filename = os.path.join(dir, 'uniqueId.cdl')\n\n self.assertEqual(\n filename,\n self.cc.file_out\n )", "def testChecksumCondition(self):\n file_defs = [\n {'name': 'file_0_byte.txt', 'path': '', 'size': 0},\n {'name': 'file_1_byte.txt', 'path': '', 'size': 1},\n {'name': 'file_320k_minus 1_byte.txt', 'path': 'folder1', 'size': 320 * 1024 - 1, 'mod_inc': -1},\n ]\n\n self._setup_test_store(file_defs)\n\n # Check if this provider supports checksum as change condition\n drive = self.drive_class(self.account_id, self.config_file_dir, self.config_pw)\n if drive.files_differ_on_hash(\n os.path.join(self.test_local_dir, file_defs[0]['path'], file_defs[0]['name']),\n 'dummy_hash') is None:\n self.skipTest('Checksum change condition not supported for this provider.')\n\n self._sync_drives()\n\n # Modify files\n for file_def in file_defs:\n file_path =\\\n os.path.join(self.test_local_dir, file_def['path'], file_def['name'])\n test_utils.make_random_file(\n file_path, file_def['size'], leave_existing=False,\n modify_timestamp_ns=os.stat(file_path).st_mtime_ns)\n\n self._sync_drives()\n self._download_store()\n self.assertDirectoriesAreEqual(self.test_local_dir, self.test_download_dir)", "def test_source_package_checksum_is_stable(self):\n response = self.client.head(\n f'/filemanager/api/{self.upload_id}/content',\n headers={'Authorization': self.token}\n )\n first_checksum = response.headers.get('ETag')\n self.assertIsNotNone(first_checksum)\n self.assertEqual(first_checksum, self.original_checksum)\n\n response = self.client.head(\n f'/filemanager/api/{self.upload_id}/content',\n headers={'Authorization': self.token}\n )\n second_checksum = response.headers.get('ETag')\n self.assertEqual(first_checksum, second_checksum)\n\n response = self.client.get(\n f'/filemanager/api/{self.upload_id}/content',\n headers={'Authorization': self.token}\n )\n third_checksum = response.headers.get('ETag')\n self.assertEqual(first_checksum, third_checksum)", "def test_success_from_bin():\n createFromBin(\"tests/vbaProject.bin\", \"src/data\", \"success_bin.xlam\")\n # Assert that xlam file is created\n assert exists(\"success_bin.xlam\")\n #assert that bin file within success_bin.xlam matches tests/vbaProject.bin\n extractBinFromZip(\"success_bin.xlam\")\n md5hasher = FileHash('md5')\n assert md5hasher.hash_file(\"tests/vbaProject.bin\") == md5hasher.hash_file(\"xl/vbaProject.bin\")\n\n createFromZip(\"success_bin.xlam\", \"src/data\", \"success_xlam.xlam\")\n assert exists(\"success_xlam.xlam\")\n #assert that bin file within success_xlam.xlam matches bin file within success_bin.xlam\n extractBinFromZip(\"success_xlam.xlam\")\n assert md5hasher.hash_file(\"tests/vbaProject.bin\") == md5hasher.hash_file(\"xl/vbaProject.bin\")", "def compare(src, dest):\n xsrc, xdest = os.path.exists(src), os.path.exists(dest)\n if not xsrc:\n return Cmp.nosrc\n if not xdest:\n return Cmp.nodest\n with open(src, \"rb\") as s:\n csrc = sha256(s.read()).digest()\n if xdest:\n with open(dest, \"rb\") as d:\n cdest = sha256(d.read()).digest()\n else:\n cdest = b\"\"\n if csrc == cdest:\n return Cmp.same\n return Cmp.differ", "def pull_file(self, source, target):\n if not source or not target:\n raise ValueError\n try:\n subprocess.check_call(self.command + [\"pull\", source, target])\n except subprocess.CalledProcessError as e:\n self.log.warning(e)\n self.log.warning(\"Failed to copy \\\"%s:%s\\\" to %s\",\n self.name, source, target)\n raise ValueError\n else:\n self.log.debug(\"Copied \\\"%s:%s\\\" to \\\"%s\\\"\",\n self.name, source, target)", "def copy_if_needed(src: str, dst: str, filter: str|List[str]|None = None) -> bool:\n #console(f'COPY {src} --> {dst}')\n if os.path.isdir(src):\n return copy_dir(src, dst, filter)\n else:\n return copy_file(src, dst, filter)", "def copy_file(src, dest):\n logger.debug(\"Copying %s to %s\", src, dest)\n try:\n shutil.copy(src, dest)\n except (OSError, IOError) as exc:\n logger.debug('Installation error, trying sudo.')\n try:\n check_call(['sudo', 'cp', src, dest])\n except HelperError:\n # That failed too - re-raise the original exception\n raise exc\n return True", "def checksum_file(vm_name, file_name, vm_executor=None):\n command = config.MD5SUM_CMD % file_name\n if not vm_executor:\n vm_executor = get_vm_executor(vm_name)\n return vm_executor.run_cmd(shlex.split(command))[1]", "def test_checksum(size1, size2, lines, tmpdir):\n fp = tmpdir.join(\"temp-data.txt\").strpath\n data = \"\\n\".join(lines)\n with open(fp, 'w') as f:\n f.write(data)\n exp = hashlib.new(\"md5\", data.encode(\"utf-8\")).hexdigest()\n res1 = checksum(fp, size1)\n res2 = checksum(fp, size2)\n assert exp == res1\n assert res1 == res2\n assert res2 == exp", "def main():\n parser = CustomArgumentParser()\n parser.add_argument(\"-s\", \"--simon-sez\",\n help=\"Really, Simon sez copy the data!\", action=\"store_true\")\n parser.add_argument(\"-r\", \"--src-directory\",\n help=\"Copy metadata from files in this directory.\")\n parser.add_argument(\"-d\", \"--dst-directory\",\n help=\"Copy metadata to matching files in this directory.\")\n parser.add_argument(\"-v\", \"--verbose\", help=\"Log level to DEBUG.\",\n action=\"store_true\")\n args = parser.parse_args()\n\n if args.verbose:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n\n error = False\n\n # Require these two arguments.\n for arg in [args.src_directory, args.dst_directory]:\n if not arg:\n logger.error(\n \"Required src or dst directory parameter missing.\")\n error = True\n # XXX: Duplicates exit below. Can't check directory if null.\n logger.error(\"Exiting due to errors.\")\n parser.usage_message()\n sys.exit(1)\n\n if (os.path.exists(args.src_directory) and\n os.path.isdir(args.src_directory)):\n src_directory = args.src_directory\n else:\n logger.error(\n \"--src-directory={} does not exist or is not a directory.\".format(\n args.dst_directory))\n error = True\n\n if (os.path.exists(args.dst_directory) and\n os.path.isdir(args.dst_directory)):\n dst_directory = args.dst_directory\n else:\n logger.error(\n \"--dst-directory={} does not exist or is not a directory.\".format(\n args.dst_directory))\n error = True\n\n if error:\n logger.error(\"Exiting due to errors.\")\n parser.usage_message()\n sys.exit(1)\n else:\n process_all_files(src_directory, dst_directory, simon_sez=args.simon_sez)", "def svn_fs_file_md5_checksum(*args):\r\n return _fs.svn_fs_file_md5_checksum(*args)", "def copy_file(src: str, dst: str, filter: str|List[str]|None = None) -> bool:\n if _passes_filter(src, filter):\n if os.path.isdir(dst):\n dst = os.path.join(dst, os.path.basename(src))\n if _should_copy(src, dst):\n #console(f'copy {src}\\n --> {dst}')\n shutil.copyfile(src, dst, follow_symlinks=True)\n shutil.copystat(src, dst, follow_symlinks=True)\n return True\n return False", "def FileSendRemoteChecksum(self, source_paths: list):\n try:\n paths = [p for pat in source_paths for p in self.expandPath(pat)]\n g = self.fileChunkGenerator(paths, False)\n return {c.path: c.sum for c in self.filemanager.SendChecksum(g)}\n except grpc.RpcError as e:\n status_code = e.code() # status_code.name and status_code.value\n if grpc.StatusCode.NOT_FOUND == status_code:\n raise FileNotFoundError(e.details()) from e\n else:\n # pass any other gRPC errors to user\n raise e", "def compute_checksum(filename):\n cmd = 'md5sum ' + filename\n return pipe(cmd)", "def checkSumHelper(arg, dirname, fnames):\n val = 0\n files = [name for name in fnames if os.path.splitext(name)[1] in EXTENSIONS]\n for file in files:\n absFile = os.path.join(dirname,file)\n try:\n stats = os.stat(absFile)\n except OSError,e:\n # This is to skip over temporary files or files\n # nosy doesn't have permission to access\n # print \"Nosy: skipping file %s with error %s\"%(absFile,e)\n continue\n val += stats[stat.ST_SIZE] + stats[stat.ST_MTIME]\n arg.append(val)\n return", "def check(src, dst):\n walker = Walker()\n walker.check(src, dst)\n return", "def copy_file(source_file, target_file):\n\t# print('\\n\\nCopying [{}] to [{}].\\n\\n'.format(source_file, target_file))\n\trun_rsync([source_file, target_file])", "def copy(self, source_host, dest_host, filename):", "def determine_should_sync(\n self, src_file: Optional[FileStats], dest_file: Optional[FileStats]\n ) -> bool:\n if dest_file:\n dest_file.operation_name = \"delete\"\n LOGGER.debug(\n \"syncing: (None) -> %s (remove), file does not \"\n \"exist at source (%s) and delete mode enabled\",\n dest_file.src if dest_file else None,\n dest_file.dest if dest_file else None,\n )\n return True", "def has_checksum_file(self):\n return self.checksum_file_path.is_file()", "def md5sum(fileSrc):\n md5 = hashlib.md5()\n try:\n with open(fileSrc, \"rb\") as fd:\n while True:\n content = fd.read(2**20)\n if not content:\n break\n md5.update(content)\n except IOError:\n print(fileSrc + \" Not found\")\n exit(1)\n return md5.hexdigest()", "def putFile( self, path, sourceSize = 0 ):\n res = checkArgumentFormat( path )\n if not res['OK']:\n return res\n urls = res['Value']\n successful = {}\n failed = {}\n for dest_url, src_file in urls.items():\n gLogger.debug( \"DIPStorage.putFile: Executing transfer of %s to %s\" % ( src_file, dest_url ) )\n res = self.__putFile( src_file, dest_url )\n if res['OK']:\n successful[dest_url] = res['Value']\n else:\n failed[dest_url] = res['Message']\n resDict = {'Failed':failed, 'Successful':successful}\n return S_OK( resDict )", "def alock_installFunc(dest, source, env):\n\n owner = env.get('INSTALL_OWNER', None)\n if owner:\n try:\n uid = pwd.getpwnam(owner)[2]\n except TypeError:\n uid = owner\n else:\n uid = -1\n\n group = env.get('INSTALL_GROUP', None)\n if group:\n try:\n gid = grp.getgrnam(group)[2]\n except TypeError:\n gid = group\n else:\n gid = -1\n\n mode = env.get('INSTALL_MODE', None)\n if not mode:\n st = os.stat(source)\n mode = (stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE)\n if isinstance(mode, str):\n mode = int(mode, 8)\n\n shutil.copy2(source, dest)\n\n if owner or group:\n os.chown(dest, uid, gid)\n\n os.chmod(dest, mode)\n return 0", "def assert_sources_folder_exist(command: commands.FilesRelatedCommand,\n filesystem: infra.Filesystem,\n stdout: infra.STDOut) -> None:\n if filesystem.not_exists(command.sources_folder):\n stdout.red(f'Sources folder does not exist: {command.sources_folder}')\n sys.exit(1)", "def make_checksum_file(self, project):\n return None", "def _copyFile(self, source, dstDir):\n dstFile = os.path.join(dstDir, os.path.basename(source))\n touch = \"/usr/bin/touch\" if OSUtilities.isMacOS() else \"/bin/touch\"\n subprocess.call([touch, dstFile])\n subprocess.call([\"/bin/cp\", source, dstDir])\n self._logger.info(\"Copying file \" + source + \" to \" + dstDir)\n self._numCopiedFiles += 1", "def copy_file(name, n_name):\n\n if os.path.isfile(config_tools.full_dest+name):\n try:\n shutil.copyfile(config_tools.full_dest+name, config_tools.full_dest+n_name)\n except OSError:\n print(f\"Не возможно копировать файл {name}\")\n else:\n print(f\"Файл {config_tools.full_dest+name} скопирован как {config_tools.full_dest+n_name}\")", "def check_exists(self, name):\n if self.pyload.config.get(\"download\", \"skip_existing\"):\n download_folder = self.pyload.config.get(\n 'general', 'download_folder')\n dest_file = fsjoin(download_folder,\n self.pyfile.package().folder if self.pyload.config.get(\n \"general\", \"folder_per_package\") else \"\",\n name)\n if exists(dest_file):\n self.pyfile.name = name\n self.skip(_(\"File exists.\"))", "def checksum_test(self, attack_args, sha256_checksum, seed=5, cleanup=True, pcap=Lib.test_pcap,\n flag_write_file=False, flag_recalculate_stats=False, flag_print_statistics=False,\n attack_sub_dir=True, test_sub_dir=True, time=False):\n\n controller = Ctrl.Controller(pcap_file_path=pcap, do_extra_tests=False, non_verbose=True)\n controller.load_pcap_statistics(flag_write_file, flag_recalculate_stats, flag_print_statistics,\n intervals=[], delete=True)\n\n controller.process_attacks(attack_args, [[seed]], time)\n\n caller_function = inspect.stack()[1].function\n\n try:\n self.assertEqual(sha256_checksum, Lib.get_sha256(controller.pcap_dest_path))\n except self.failureException:\n Lib.rename_test_result_files(controller, caller_function, attack_sub_dir, test_sub_dir)\n raise\n\n if cleanup:\n Lib.clean_up(controller)\n else:\n Lib.rename_test_result_files(controller, caller_function, attack_sub_dir, test_sub_dir)", "def _get_checksum(self, arg):", "def test_file_integrity_remove_file_in_case_of_fail():\n test_file = open('./testfile.tmp', 'a')\n test_file.close()\n test_file_path = os.path.realpath('./testfile.tmp')\n test_file_md5 = hashlib.md5(open(test_file_path, 'rb').read()).hexdigest()\n\n bad_md5 = 'some_noise_%s' % test_file_md5\n\n PackageDownloadHelper.check_file_integrity(test_file_path, bad_md5)\n\n assert not os.path.isfile(test_file_path)", "def copyFile(source, target):\n\tfrom shutil import copyfile, copystat, copymode\n\tfrom os.path import split\n\tsource = adaptPath(source)\n\ttarget = adaptPath(target)\n\tif int(getFileModifTime(source)) != int(getFileModifTime(target)):\n\t\tmakedir(split(target)[0])\n\t\tcopyfile(source, target)\n\t\tcopystat(source, target)\n\t\tcopymode(source, target)\n\t#~ else:\n\t\t#~ print (\"%s not copied\"%(target))", "def need_checksum(self):\n if self.skip_checksum:\n log.warning(\"Skip checksum because --skip-checksum is specified\")\n return False\n # There's no point running a checksum compare for selective dump\n if self.where:\n log.warning(\"Skip checksum because --where is given\")\n return False\n # If the collation of primary key column has been changed, then\n # it's high possible that the checksum will mis-match, because\n # the returning sequence after order by primary key may be vary\n # for different collations\n for pri_column in self._pk_for_filter:\n old_column_tmp = [\n col for col in self._old_table.column_list if col.name == pri_column\n ]\n if old_column_tmp:\n old_column = old_column_tmp[0]\n new_column_tmp = [\n col for col in self._new_table.column_list if col.name == pri_column\n ]\n if new_column_tmp:\n new_column = new_column_tmp[0]\n if old_column and new_column:\n if not is_equal(old_column.collate, new_column.collate):\n log.warning(\n \"Collation of primary key column {} has been \"\n \"changed. Skip checksum \".format(old_column.name)\n )\n return False\n # There's no way we can run checksum by chunk if the primary key cannot\n # be covered by any index of the new schema\n if not self.validate_post_alter_pk():\n if self.skip_pk_coverage_check:\n log.warning(\n \"Skipping checksuming because there's no unique index \"\n \"in new table schema can perfectly cover old primary key \"\n \"combination for search\".format(old_column.name)\n )\n return False\n else:\n # Though we have enough coverage for primary key doesn't\n # necessarily mean we can use it for checksum, it has to be an\n # unique index as well. Skip checksum if there's no such index\n if not self.find_coverage_index():\n log.warning(\n \"Skipping checksuming because there's no unique index \"\n \"in new table schema can perfectly cover old primary key \"\n \"combination for search\".format(old_column.name)\n )\n return False\n return True", "def copyfile(source, dest, newname=None):\n\n if not os.path.exists(source):\n #print 'no such file %s' %source\n return False\n shutil.copy(source, newname)\n dest = os.path.join(dest, newname)\n if os.path.exists(dest):\n os.remove(dest)\n shutil.move(newname, dest)\n return True", "def _verify_archive_equality(self, file1, file2):\r\n temp_dir_1 = mkdtemp()\r\n temp_dir_2 = mkdtemp()\r\n try:\r\n extract_source(file1, temp_dir_1)\r\n extract_source(file2, temp_dir_2)\r\n return directories_equal(temp_dir_1, temp_dir_2)\r\n\r\n finally:\r\n shutil.rmtree(temp_dir_1)\r\n shutil.rmtree(temp_dir_2)", "def do_compare(self, str_arg):\n arg = validateString(str_arg)\n source, target = arg.split(' ', 1)\n if os.path.isfile(source):\n # Mar 27 @swang: if target file doesn't exist, copy source file to setup directory for later test\n # 2015-08-27: decided to go to fail path\n if not os.path.isfile(target):\n # copy(source, target)\n self.resultFlag = False\n raise ValueError('COMPARE FAILED: target file not found.')\n # if not self.__compareImage(source, target):\n if not filecmp.cmp(source, target):\n printLog(self.threadName + 'COMPARE FAILED: source file and target file DIFFER!', logging.WARNING)\n self.resultFlag = False\n else:\n self.resultFlag = False\n raise ValueError('COMPARE FAILED: source file not found.')", "def process(self, source_path: pathlib.Path) -> bool:", "def run_copy(self, src, dst):\n pass", "def check(src, perm, dest, cmds, comp, verbose=False):\n if comp == Cmp.differ:\n ansiprint(f\"The file '{src}' differs from '{dest}'.\", fg=Color.red, i=True)\n elif comp == Cmp.nodest:\n ansiprint(\n f\"The destination file '{dest}' does not exist\",\n fg=Color.black,\n bg=Color.red,\n )\n elif comp == Cmp.nosrc:\n ansiprint(\n f\"The source file '{src}' does not exist.\", fg=Color.black, bg=Color.red\n )\n elif comp == Cmp.same and verbose:\n ansiprint(f\"The files '{src}' and '{dest}' are the same.\", fg=Color.green)", "def _copy_file(src, dest):\n\n if src is None or dest is None:\n raise ValueError(\"src and dest must not be None\", src, dest)\n\n if not os.path.isfile(src):\n raise ValueError(\"src file does not appear to exist\", src)\n\n # if error on copy, subprocess will raise CalledProcessError\n try:\n subprocess.run(\n [\"/usr/bin/ditto\", src, dest], check=True, stderr=subprocess.PIPE\n )\n except subprocess.CalledProcessError as e:\n logging.critical(\n f\"ditto returned error: {e.returncode} {e.stderr.decode(sys.getfilesystemencoding()).rstrip()}\"\n )\n raise e", "def move_file(src, dst):\n # Sanity checkpoint\n src = re.sub('[^\\w/\\-\\.\\*]', '', src)\n dst = re.sub('[^\\w/\\-\\.\\*]', '', dst)\n if len(re.sub('[\\W]', '', src)) < 5 or len(re.sub('[\\W]', '', dst)) < 5:\n debug.log(\"Error: Moving file failed. Provided paths are invalid! src='%s' dst='%s'\"%(src, dst))\n else:\n # Check destination\n check = False\n if dst[-1] == '/':\n if os.path.exists(dst):\n check = True # Valid Dir\n else:\n debug.log(\"Error: Moving file failed. Destination directory does not exist (%s)\"%(dst)) #DEBUG\n elif os.path.exists(dst):\n if os.path.isdir(dst):\n check = True # Valid Dir\n dst += '/' # Add missing slash\n else:\n debug.log(\"Error: Moving file failed. %s exists!\"%dst)\n elif os.path.exists(os.path.dirname(dst)):\n check = True # Valid file path\n else:\n debug.log(\"Error: Moving file failed. %s is an invalid distination!\"%dst)\n if check:\n # Check source\n files = glob.glob(src)\n if len(files) != 0:\n debug.log(\"Moving File(s)...\", \"Move from %s\"%src, \"to %s\"%dst)\n for file_ in files:\n # Check if file contains invalid symbols:\n invalid_chars = re.findall('[^\\w/\\-\\.\\*]', os.path.basename(file_))\n if invalid_chars:\n debug.graceful_exit((\"Error: File %s contains invalid \"\n \"characters %s!\"\n )%(os.path.basename(file_), invalid_chars))\n continue\n # Check file exists\n if os.path.isfile(file_):\n debug.log(\"Moving file: %s\"%file_)\n shutil.move(file_, dst)\n else:\n debug.log(\"Error: Moving file failed. %s is not a regular file!\"%file_)\n else: debug.log(\"Error: Moving file failed. No files were found! (%s)\"%src)", "def checksum(self, md5_file, file_name):\n try:\n with open(md5_file, 'r') as f:\n md5_file_contents = f.read()\n md5_str = md5_file_contents.split(' ')[0]\n os.remove(md5_file)\n except Exception as e:\n logging.exception('Could not read MD5 file {}. \\\n \\nTry to download the file again'.format(file_name))\n return False\n if not self.check_md5(file_name, md5_str):\n logging.error('Failed in checksum. Download the file again.')\n return False\n return True", "def copy(self, src_path: str, tgt_path: str) -> None:", "def testCheckSourceCopyOperation_Pass(self):\n payload_checker = checker.PayloadChecker(self.MockPayload())\n self.assertIsNone(\n payload_checker._CheckSourceCopyOperation(None, 134, 134, 'foo'))", "def hash_file(self, file_path, file_arcname):\n\n file_path = os.path.abspath(file_path)\n\n # If the file_arcname argument is None use the base file name as the\n # arc name\n if file_arcname is None:\n file_arcname = os.path.basename(file_path)\n\n if not os.path.exists(file_path):\n task_error(\"%s doesn't exist\" % file_path)\n if not os.access(file_path, os.R_OK):\n task_error(\"Can't read from %s\" % file_path)\n\n file_mode = os.stat(file_path)[stat.ST_MODE]\n if not stat.S_ISDIR(file_mode) and not stat.S_ISREG(file_mode):\n task_error(\"Unknown file type for %s\" % file_path)\n\n file_in = None\n try:\n # open to read binary. This is important.\n file_in = open(file_path, 'rb')\n except IOError:\n task_error(\"Couldn't read from file: %s\" % file_path)\n\n # hash file 1Mb at a time\n hashval = hashlib.sha1()\n while True:\n data = file_in.read(1024 * 1024)\n if not data:\n break\n hashval.update(data)\n\n # update file bundle status\n\n self.running_size += len(data)\n\n self.percent_complete = 100.0 * self.running_size / self.bundle_size\n\n # only update significant progress\n if self.percent_complete - self.last_percent > 1:\n self.report_percent_complete()\n self.last_percent = self.percent_complete\n\n file_hash = hashval.hexdigest()\n\n # print 'hash: ' + file_hash\n file_in.close()\n\n modified_name = os.path.join('data', file_arcname)\n (file_dir, file_name) = os.path.split(modified_name)\n\n # linuxfy the directory\n file_dir = file_dir.replace('\\\\', '/')\n\n info = {}\n info['size'] = os.path.getsize(file_path)\n mime_type = mimetypes.guess_type(file_path, strict=True)[0]\n\n info['mimetype'] = mime_type if mime_type is not None else 'application/octet-stream'\n info['name'] = file_name\n info['mtime'] = DT.datetime.utcfromtimestamp(int(os.path.getmtime(file_path))).isoformat()\n info['ctime'] = DT.datetime.utcfromtimestamp(int(os.path.getctime(file_path))).isoformat()\n info['destinationTable'] = 'Files'\n info['subdir'] = file_dir\n info['hashsum'] = file_hash\n info['hashtype'] = 'sha1'\n\n # todo make sure errors bubble up without crashing\n if file_arcname in self.file_meta:\n print file_arcname\n task_error(\n \"Different file with the same arcname is already in the bundle\")\n return\n\n return info", "def evaluate(self):\n errors_found = 0\n\n # Test if destination exists and is a directory\n if not os.path.isdir(os.path.expandvars(self.path_destination)):\n message.alert(\n f\"FILE [{self.name}]: '{self.path_destination}' destination path is not a known directory.\"\n )\n errors_found += 1\n\n # Test if sudo is really needed if the file's\n # destination is in a directory owned by the current user\n if os.getenv(\"HOME\") in os.path.expandvars(self.path_destination) and self.sudo:\n message.alert(\n f\"FILE [{self.name}]: Sudo use may be unnecessary as {self.path_destination} is in your home path.\"\n )\n errors_found += 1\n\n # Test if source is a directory\n if self.path_source is not None:\n if not os.path.isdir(os.path.expandvars(self.path_source)):\n message.alert(\n f\"FILE [{self.name}]: '{self.path_source}' source path is not a known directory.\"\n )\n errors_found += 1\n\n if not os.path.isfile(\n os.path.join(os.path.expandvars(self.path_source), self.name)\n ):\n message.alert(\n f\"FILE [{self.name}] at PATH: [{self.path_source}] does not exist.\"\n )\n errors_found += 1\n\n # Check expected types\n if not isinstance(self.comments, list):\n message.alert(\n f\"FILE [{self.name}]: Type mismatch, comments attribute is of type '{type(self.comments)}' instead of 'list'\"\n )\n errors_found += 1\n\n return errors_found", "def copyFile(src, dest):\n try:\n shutil.copy(src,dest)\n except shutil.Error as e:\n print(\"Error: \" + str(e))\n except IOError as e:\n print(\"Error: \" + e.strerror)", "def source(dirname, filename, gen_content):\n if dirname in lut['sources']:\n s.add('MD5SUM=\"$(find \"{0}\" -printf %T@\\\\\\\\n | md5sum)\"', dirname)\n if secret is None:\n s.add('tar xf \"{0}\" -C \"{1}\"',\n filename,\n dirname,\n sources={filename: gen_content()})\n else:\n s.add('wget \"{0}/{1}/{2}/{3}\"', server, secret, b.name, filename)\n s.add('tar xf \"{0}\" -C \"{1}\"', filename, dirname)\n for manager, service in lut['sources'][dirname]:\n s.add('[ \"$MD5SUM\" != \"$(find \"{0}\" -printf %T@\\\\\\\\n ' # No ,\n '| md5sum)\" ] && {1}=1',\n dirname,\n manager.env_var(service))", "def get_checksum_subdir(download_cmd, dest):\n\n # If the download is not forced and there is a checksum, allow\n # checksum match to skip the download.\n download_cmd_digest = \"\"\n try:\n download_cmd_digest = hashlib.md5(download_cmd).hexdigest()\n except TypeError:\n download_cmd_digest = hashlib.md5(\n download_cmd.encode('utf-8')).hexdigest()\n\n try:\n dir_ls = os.listdir(\"%s\" % (dest))\n except (IOError, OSError) as e:\n # no subdir with downloaded agents found\n return (download_cmd_digest, True)\n\n if download_cmd_digest in dir_ls:\n return (download_cmd_digest, False)\n else:\n return (download_cmd_digest, True)", "def check_output(self):\n directory, file = split(self.target)\n if not exists(directory):\n mkdir(directory)\n if exists(self.target):\n unlink(self.target)", "def checksum_of(filepath):\n bfsz = 10240000 # 10 MB buffer\n sum = hashlib.sha256()\n with open(filepath) as fd:\n while True:\n buf = fd.read(bfsz)\n if not buf: break\n sum.update(buf)\n return sum.hexdigest()", "def checkPath(filename, projectSource):\n filePath = os.path.join(projectSource, filename)\n if os.path.exists(filePath):\n pass\n else:\n sys.stderr.write(\"Error: \" + filePath + \" not found\")\n sys.exit(1)\n return filePath", "def copyfile(src, dst, overwrite=False, changed_only=True, link=False):\n if os.path.isdir(dst):\n dst = os.path.join(dst, os.path.basename(src))\n if os.path.exists(dst) and not overwrite:\n return False\n if samefile(src, dst):\n return False\n try:\n if not os.path.exists(dst):\n dstdir = dirname(dst)\n if not os.path.exists(dstdir):\n os.makedirs(dstdir)\n else:\n # `dst` exists, check for changes\n if changed_only:\n sstat = os.stat(src)\n dstat = os.stat(dst)\n if (sstat.st_size ==\n dstat.st_size and sstat.st_mtime <= dstat.st_mtime):\n # same size and destination more recent, do not copy\n return False\n if link:\n try:\n os.link(src, dst)\n except OSError:\n # retry with normal copy\n shutil.copy2(src, dst)\n else:\n shutil.copy2(src, dst)\n except WindowsError:\n pass\n return True", "def hexists(file_path: str) -> bool:\n return os.path.exists(file_path)", "def copy_file ( self, source, dest, chown=True, chmod=True ):\n if self._copy_file ( source, dest ):\n if chmod:\n self.chmod_file ( dest )\n if chown:\n self.chown_file ( dest )\n\n return True\n else:\n return False", "def check_hash(self, fname, args):\n fobj = self._open_file(fname)\n\n rc = 0\n format_errors = 0\n hash_errors = 0\n read_errors = 0\n for idx, line in enumerate(fobj):\n # remove any newline characters\n m = self.CHECK_RE.match(line.strip())\n if not m:\n if args.warn:\n self.app.stderr.write(\n 'hasher {0}: {1}: {2}: improperly formatted {3}'\n ' checksum line\\n'.format(self.name, fname, idx + 1,\n self.name.upper()))\n format_errors += 1\n rc = 1\n continue\n hash_value, binary, check_file = m.groups()\n\n try:\n check_f = open(check_file, 'rb' if binary == '*' else 'r')\n except IOError:\n self.app.stderr.write(\n 'hasher {0}: {1}: No such file or directory\\n'.format(\n self.name, check_file))\n if not args.status:\n self.app.stdout.write(\n STATUS_MSG.format(check_file, READ_ERROR))\n read_errors += 1\n rc = 1\n continue\n\n if self._calculate_hash(check_f) == hash_value:\n if not (args.quiet or args.status):\n self.app.stdout.write(\n STATUS_MSG.format(check_file, SUCCESS))\n else:\n if not args.status:\n self.app.stdout.write(\n STATUS_MSG.format(check_file, HASH_ERROR))\n hash_errors += 1\n rc = 1\n\n if format_errors and not args.status:\n self.app.stderr.write(\n 'hasher {0}: WARNING: {1} line{2} {3} improperly'\n ' formatted\\n'.format(\n self.name,\n format_errors,\n 's' if format_errors > 1 else '',\n 'are' if format_errors > 1 else 'is',\n ))\n if read_errors and not args.status:\n self.app.stderr.write(\n 'hasher {0}: WARNING: {1} listed file{2}'\n ' could not be read\\n'.format(\n self.name,\n read_errors,\n 's' if read_errors > 1 else '',\n ))\n if hash_errors and not args.status:\n self.app.stderr.write(\n 'hasher {0}: WARNING: {1} computed checksum{2}'\n ' did NOT match\\n'.format(\n self.name,\n hash_errors,\n 's' if hash_errors > 1 else '',\n ))\n return rc", "def rsync_and_md5(old_name, new_name, md5sum=None):\n if md5sum is None:\n md5sum = md5(old_name)\n\n syscall(\"rsync \" + old_name + \" \" + new_name)\n new_md5sum = md5(new_name)\n\n if new_md5sum != md5sum:\n raise Exception(\n \"Error copying file \"\n + old_name\n + \" -> \"\n + new_name\n + \"\\n. md5s do not match\"\n )\n else:\n return md5sum", "def _check_cow(image_dir):\n try:\n src = f'{image_dir}/.cowcheck'\n dst = f'{image_dir}/.cowcheck1'\n sh.touch(src)\n sh.cp('--reflink=always', src, dst)\n return True\n except Exception:\n warn_msg = f\"\"\"\n Copy-on-write check failed.\n The file system where images are stored ({image_dir}) does not support copy-on-write.\n It is recommended to use an XFS or BTRFS file system with copy-on-write enabled as a storage\n location for S2E images, as this can save up to 60% of disk space. The building process checkpoints\n intermediate build steps with cp --reflink=auto to make use of copy-on-write if it is available.\n\n How to upgrade:\n 1. Create an XFS or BTRFS partition large enough to store the images that you need (~300 GB for all images).\n Make sure you use reflink=1 to enable copy-on-write when running mkfs.xfs.\n 2. Create a directory for guest images on that partition (e.g., /mnt/disk1/images)\n 3. Delete the \"images\" folder in your S2E environment\n 4. Create in your S2E environment a symbolic link called \"images\" to the directory you created in step 2\n \"\"\"\n logger.warning(re.sub(r'^ {8}', '', warn_msg, flags=re.MULTILINE))\n return False\n finally:\n sh.rm('-f', src)\n sh.rm('-f', dst)", "def checksum(self):\n checksums = {\n \"slug\": hashlib.sha256(\n self.slug.encode(\"utf-8\")\n ).hexdigest(),\n \"files\": {},\n }\n\n def file_hash(filepath):\n running_hash = hashlib.sha256()\n with open(filepath, \"rb\") as IN:\n while True:\n # Read file in as little chunks.\n buf = IN.read(4096)\n if not buf:\n break\n running_hash.update(buf)\n return running_hash.hexdigest()\n\n # iterate over the direcory and calucalte the hash\n for root, dirs, files in os.walk(self.thawed_dir):\n for file_path in sorted(files):\n full_path = str(Path(root) / file_path)\n # Calculate a relative path to the freezable object\n rel_path = full_path.replace(str(self.thawed_dir) + \"/\", \"\")\n # calculate and store the checksums\n phash = file_hash(full_path)\n filesize = os.path.getsize(full_path)\n checksums[\"files\"][rel_path] = {\n \"checksum\": phash,\n \"size\": filesize,\n }\n # calculate the total\n total = hashlib.sha256(checksums[\"slug\"].encode(\"utf-8\"))\n # Iterate over filenames AND hashes and update checksum\n for filename, data in checksums[\"files\"].items():\n total.update(filename.encode(\"utf-8\"))\n total.update(data[\"checksum\"].encode(\"utf-8\"))\n checksums[\"total\"] = total.hexdigest()\n return checksums", "def ExecuteIf(self, args, src_files, dst_files):\n if self.ShouldBuild(src_files, dst_files):\n self.MakeDestinationDirectories(dst_files)\n self.Execute(args)\n if self.execute and not self.VerifyExists(dst_files):\n raise RuntimeError(\"FAILED: build did not create all required files\")", "def validate_file_destination(namespace):\n try:\n path = namespace.destination\n except AttributeError:\n return\n else:\n # TODO: Need to confirm this logic...\n file_path = path\n file_dir = os.path.dirname(path)\n if os.path.isdir(path):\n file_name = os.path.basename(namespace.file_name)\n file_path = os.path.join(path, file_name)\n elif not os.path.isdir(file_dir):\n try:\n os.mkdir(file_dir)\n except EnvironmentError as exp:\n message = \"Directory {} does not exist, and cannot be created: {}\"\n raise ValueError(message.format(file_dir, exp))\n if os.path.isfile(file_path):\n raise ValueError(\"File {} already exists.\".format(file_path))\n namespace.destination = file_path", "def check_is_event_valid(self, event):\n if event.src_path == template_file_path:\n self.__init__()\n if not hasattr(event, 'dest_path'):\n event.dest_path = None\n for path in [event.src_path, event.dest_path]:\n if path is not None:\n dir, name = self.__parse_full_path(path)\n if dir.find(controlled_path\n ) >= 0 and not self._check_is_name_valid(name):\n if path == event.dest_path:\n os.system('cp {dest} {src}'.format(\n dest=event.dest_path, src=event.src_path))\n os.system('rm -rf {dir}{name}'.format(dir=dir, name=name))", "def test_nonexistent_path(tmpdir):\n with pytest.raises(IOError):\n checksum(tmpdir.join(\"does-not-exist.txt\").strpath)", "def file_copy(\n self,\n src: str,\n dest: Optional[str] = None,\n file_system: Optional[str] = None,\n peer: Optional[bool] = False,\n ) -> None:\n if dest is None:\n dest = os.path.basename(src)\n\n if file_system is None:\n file_system = self._get_file_system()\n\n # netmiko's enable_scp\n self.enable_scp()\n self._file_copy(src, dest, file_system)\n if peer:\n self.peer_device._file_copy(src, dest, file_system) # pylint: disable=protected-access\n\n # logging removed because it messes up unit test mock_basename.assert_not_called()\n # for tests test_file_copy_no_peer_pass_args, test_file_copy_include_peer\n # log.info(\"Host %s: File %s transferred successfully.\")" ]
[ "0.7140068", "0.6845625", "0.6302969", "0.6295187", "0.61806387", "0.61280686", "0.60687417", "0.59943986", "0.59558606", "0.5897827", "0.5882352", "0.58686715", "0.5864275", "0.58456916", "0.58347523", "0.58037543", "0.57591784", "0.5748481", "0.57282805", "0.57282805", "0.56951106", "0.56919247", "0.5664753", "0.5663705", "0.5628241", "0.5627122", "0.5625461", "0.55792797", "0.55756915", "0.5572036", "0.55552787", "0.5520113", "0.5513952", "0.5504636", "0.550185", "0.5499364", "0.5468387", "0.54509", "0.5427588", "0.54261225", "0.54187167", "0.54181033", "0.5415414", "0.5410967", "0.53951955", "0.5393025", "0.53927755", "0.5389868", "0.5373187", "0.5368536", "0.53505635", "0.5333802", "0.53254074", "0.532351", "0.53216743", "0.5303266", "0.5297884", "0.52941805", "0.52828693", "0.5272482", "0.5261681", "0.5260972", "0.52437663", "0.5243001", "0.5238974", "0.5230989", "0.5224485", "0.52192163", "0.5219067", "0.52122086", "0.5202346", "0.520157", "0.5196413", "0.5175625", "0.5169104", "0.51601756", "0.5159274", "0.5158185", "0.51555204", "0.51476115", "0.51441926", "0.5138477", "0.5135487", "0.51269555", "0.5122361", "0.51153195", "0.5109802", "0.51011354", "0.50976086", "0.50918806", "0.50898075", "0.50873893", "0.50828177", "0.508092", "0.507804", "0.5074106", "0.5073415", "0.5072035", "0.5064077", "0.5063574" ]
0.7357597
0
Establish a connection with the labview server. If the labview program is ever stopped and restarted (as it should be when not taking data, to avoid wearing out the shutter), this should be called to reestablish the connection.
def connect(self): self.sock = s.socket(s.AF_INET,s.SOCK_STREAM) self.sock.connect((self.remote_host, self.remote_port))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connect(self):\n self.class_logger.info(\"Performing connection to TRex server via HLT API\")\n self.check_res(self.hltapi.connect(device=self.host, port_list=self.ports, reset=True, break_locks=True))", "def connect_to_server(self):\n\n server=os.popen('hostname').read()\n if 'epfl.ch' not in server:\n conn = mds.Connection('tcvdata.epfl.ch')\n conn.openTree('tcv_shot', self.shot)\n self.tree = conn\n print(\"You are in server \"+server+\", so I'll open a connection\")\n else:\n self.tree = mds.Tree('tcv_shot', self.shot)", "def connect(self):\n\n self.tello.connect()\n self.tello.wait_for_connection(60.0)", "def _establish_connection(self):\n self.conn = self.listener.accept()", "def connect(self):\n self.conn.connect()", "def connect(self):\n\t\tself._entity_server_connection.attempt_connection()", "def attempt_connection(self):\n self.host = self.hostBox.text()\n self.port = self.portBox.text()\n self.log_name = str(self.logBox.currentText())\n self.log_num = self.get_log_number(self.log_name)\n self.mode = str(self.modeBox.currentText())\n if not self.log_mode_programmable():\n self.incomp_mode_err = QErrorMessage()\n self.incomp_mode_err.showMessage('Incompatible mode: can only '\n + 'program historical logs 1, 2, '\n + 'and 3')\n return\n while True:\n try:\n self.client = client.connect(self.host, self.port)\n self.connected = True\n break\n except Exception as e:\n self.connection_failed_err = QErrorMessage()\n self.connection_failed_err.showMessage('Failed to connect to'\n + ' meter')\n return\n self.parent.host = self.host\n self.parent.port = self.port\n self.parent.log_name = self.log_name\n self.parent.log_num = self.log_num\n self.parent.client = self.client\n self.parent.connected = self.connected\n # we assign these here because we need to create the client before\n # some of the functionality of these views works\n self.parent.programLogView = plv.ProgramLogWidget(self)\n self.parent.retrieveLogView = rlv.RetrieveLogWidget(self)\n self.parent.stackedWidget.addWidget(self.parent.retrieveLogView)\n self.parent.stackedWidget.addWidget(self.parent.programLogView)\n self.parent.updateMode(self.mode)\n self.parent.update_statusbar()\n\n self.close()", "def connectToServer(self):\n self.client = Client(base_url = self.server)\n self.ping()", "def connectTvdb(self,host):\n if None==self.tvdb:\n log(\"connectTvdb: \"+host)\n self.tvdb = http.client.HTTPConnection(host,timeout=30)", "def connect():", "def _connect(self):\n\n # Get the timeout\n m_timeout = OMPv4.TIMEOUT\n if self.__timeout:\n m_timeout = self.__timeout\n\n # Connect to the server\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(m_timeout)\n try:\n sock.connect((self.__host, int(self.__port)))\n except socket.error, e:\n raise ServerError(str(e))\n self.socket = ssl.wrap_socket(sock, ssl_version=ssl.PROTOCOL_TLSv1)\n\n # Authenticate to the server\n self._authenticate(self.__username, self.__password)", "def connect_to_server(self):\n \n server.setserver(\n ip=self.config.get('Network', 'ip'),\n port=self.config.getint('Network', 'port')\n )\n self._gui_server = server.connect(self)", "def connect_to_server(self):\n\t\tself.outside.start()\n\t\tself.outside.register(self.config.server_ip, self.config.server_port)\n\n\t\tself.thin.start()\n\t\tself.thin.register(self.config.server_ip, self.config.server_port)", "def connect(self):\n self.ipv4 = socket.gethostbyname(socket.gethostname())\n self.addr = (self.ipv4, HttpServer.PORT)\n self.server.bind(self.addr)\n print(\"[SETUP] server bound to IPv4 address\", self.ipv4, \"on port\", HttpServer.PORT)\n self.server.listen()\n print(\"[SETUP] server listening for connections\")", "def connect(self) -> None:\n self.s.connect((self.ip, self.port))", "def attempt_to_connect(self):\n if self.server_handler.attempt_connection:\n self.server_handler.attempt_connection = False\n else:\n self.server_handler.attempt_connection = True", "def run(self):\n\t\t\n\t\tself.connect(self.config[\"server\"])", "def connect(self):\n #print(\"try to connect connect\")\n if self._loop is not None and not self._loop.ready():\n #print(\"RE\")\n raise RuntimeError(\"Already (auto-re)connecting\")\n self._loop = gevent.spawn(self._run)", "def connect(self):\n self.conn.add_listener(self.handle_connection_change)\n self.conn.start_async()", "def connect(self):\n\n self.wm = telnetlib.Telnet(self.ip, self.port, self.timeout)\n time.sleep(2)\n print self.wm.read_very_eager() #clears connection message\n self.measure_chan()", "def connect(self):\n\t\tpass", "def connect_to_master():", "def connect(self):\n self.client.connect(self.host, self.port)\n self.client.loop_forever()", "def connect(self, reconnect=True, *args, **kwargs):\n pass", "def connect(self):\n self.sock = socket.socket()\n self.sock.connect(self.addr)\n self.send(\"PASS {}\".format(self.password))\n self.send(\"NICK {}\".format(self.nick))\n self.send(\"JOIN {}\".format(self.channel))\n self.send(\"CAP REQ :twitch.tv/tags\")\n self.is_connected = True\n # TODO: error handling", "def connect(self):\n from labrad.wrappers import connectAsync\n self.cxn = yield connectAsync(name='Protection_Beam_Server')\n self.arduino = self.cxn.arduinottl\n self.pmt = self.cxn.normalpmtflow\n self.enable_protection_shutter(self, self.enable_shutter)\n self.setupListeners()", "def connect():\n if not is_notebook():\n print('Python session is not running in a Notebook Kernel')\n return\n\n global _comm\n\n kernel = get_ipython().kernel\n kernel.comm_manager.register_target('tdb', handle_comm_opened)\n # initiate connection to frontend.\n _comm = Comm(target_name='tdb', data={})\n # bind recv handler\n _comm.on_msg(None)", "def connect_to_ibkr(self):\n\n self.update_console(\"Reporting connection to the server...\")\n print(\"Reporting connection to the server...\")\n result = report_login_to_server(self.settings)\n self.update_console(result)\n connector = Worker(self.ibkrworker.prepare_and_connect)\n connector.signals.result.connect(self.connection_done)\n connector.signals.status.connect(self.update_status)\n connector.signals.notification.connect(self.update_console)\n # Execute\n self.threadpool.start(connector)", "def connect(self) -> None:\n self.terminate()\n self._new_client().connect(\n hostname=self.ip,\n port=self.port,\n username=self.username,\n password=self.password,\n look_for_keys=False,\n allow_agent=False)", "def run(self):\n self.connect()", "def connectToServer(self):\r\n\t\tself.rtspSocket_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\t\ttry:\r\n\t\t\tself.rtspSocket_client.connect((self.serverAddr, self.serverPort))\r\n\t\texcept:\r\n\t\t\tprint(\"Fail to connect to server\")", "def establish_connection(self):\n print('Listening...')\n self.socket.listen()\n self.conn, addr = self.socket.accept()\n print('Received connection', addr)", "async def connect(self):\n pass", "def _connect(self):\n #print(\"Connecting...\")\n self._connection = reactor.connectTCP(self.host, self.port, self.factory) #@UndefinedVariable", "def maintainConnection():\n return RoboCaller().call(\"maintainConnection\", \"void\")", "async def _connect(self):\n pass", "def _connect(self):\n if self._is_running:\n return\n\n # Create thread for receiving motion capture data\n self._data_thread = DataThread(self._adapter, self._local_ip, self._multicast_ip, self._data_port)\n self._data_thread.daemon = True\n self._data_thread.start()\n\n # Create thread for sending commands and receiving result\n self._command_thread = CommandThread(self._adapter, self._server_ip, self._command_port)\n self._command_thread.daemon = True\n self._command_thread.start()\n\n self._is_running = True", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "async def connect(self):\n raise NotImplementedError", "def connect(self):\n self.socket.connect((\"localhost\",self.PORT_NUM))", "async def connect(self):\n try:\n self._cmd_stream = await self._connect()\n self.inc_counter(\"%s.connected\" % self.objname)\n self.logger.info(\"Connected: %s\", self._extra_info)\n except Exception as e:\n self.logger.error(\"Connect Failed %r\", e)\n self.inc_counter(\"%s.failed\" % self.objname)\n raise e", "def __init__(self):\n self.try_to_connect()", "def connectionMade(self):\n protocol.Protocol.connectionMade(self)\n self.port = self.transport.getHost().port\n #Start the inactivity timer the connection is dropped if we receive no data\n self.activateInactivityTimer()\n self.sessionState = SMPPSessionStates.OPEN\n self.log.warning(\"SMPP connection established from %s to port %s\", self.transport.getPeer().host, self.port)", "def connect(self):\n self.engine = create_engine(self.connection_string)\n self.conn = self.engine.connect()\n self.connected = True", "def connect(self, host, port):\n pass", "def OpenConnection(self):\n if self._master_connection_open or self.local:\n return\n # Establish master SSH connection using ControlPersist.\n with open(os.devnull, 'w') as devnull:\n subprocess.call(\n self.FormSSHCommandLine(['-M', '-o ControlPersist=yes']),\n stdin=devnull,\n stdout=devnull,\n stderr=devnull)\n self._master_connection_open = True", "def OpenConnection(self):\n if self._master_connection_open or self.local:\n return\n # Establish master SSH connection using ControlPersist.\n with open(os.devnull, 'w') as devnull:\n subprocess.call(\n self.FormSSHCommandLine(['-M', '-o ControlPersist=yes']),\n stdin=devnull,\n stdout=devnull,\n stderr=devnull)\n self._master_connection_open = True", "def initialize(self):\n if not self.connection.is_closed():\n self.connection.close()\n\n self.connection.connect()", "def connect(self):\n\n print(\"Connecting to server at {}:{}\".format(self.hostname, self.port))\n\n self._sock = socket.socket()\n self._sock.setblocking(True)\n self._sock.connect((self.hostname, self.port))\n self._sockfile = self._sock.makefile(encoding=\"utf-8\")\n self._connected = True\n\n if self.password:\n self._sendmsg(\"PASS :{}\".format(self.password))\n self._sendmsg(\"NICK {}\".format(self.nickname))\n self._sendmsg(\"USER {} 0 * :ORE Utility Bot\".format(getpass.getuser()))\n if self.ident_password:\n self._sendmsg(\"PRIVMSG NickServ :identify {}\".format(\n self.ident_password))\n self._sendmsg(\"JOIN {}\".format(\",\".join(self.channels)))", "def connect(self) -> None:", "def on_open(self):\n\n # TODO: Need logging, request timeout and exception handling down there:\n self.log.debug(\"Connecting to Neo4j.\")\n self._neo4j = Neo4j(host=self.config.host, port=self.config.port)\n self.log.status(\"Connected to Neo4j on %s:%d.\" % (self.config.host, self.config.port))", "def _connect(self):\n hostport = self.getHost()\n channelOpenData = forwarding.packOpen_direct_tcpip((self.host, self.port), (hostport.host, hostport.port))\n self.connector.connection.openChannel(self, channelOpenData)", "def connect(self):\n if not self.is_connected:\n self._init_cec_connection()", "def connect(self):\n pass", "def reconnect(self):\n self.close()\n self.connect()", "def pedrpc_connect(self):\n # If the process monitor is alive, set it's options\n if self.procmon:\n while 1:\n if self.procmon.alive():\n break\n\n time.sleep(1)\n\n # connection established.\n for key, value in self.procmon_options.items():\n getattr(self.procmon, 'set_{0}'.format(key))(value)\n\n # If the network monitor is alive, set it's options\n if self.netmon:\n while 1:\n if self.netmon.alive():\n break\n\n time.sleep(1)\n\n # connection established.\n for key in self.netmon_options.keys():\n eval('self.netmon.set_%s(self.netmon_options[\"%s\"])' % (key, key))", "def initialize(self):\n if self.real:\n self.agent.connect(self)\n else:\n self.connect() # Connect python client to VREP\n self.agent.connect(self)", "def connect_to_server(self):\n\n\t\tself.__logs.append('-- connecting to server ...')\n\t\tself.connect_to_ssh_server.emit(self.__ip.text(), int(self.__port.text()),\\\n\t\t\tself.__username.text(), self.__pwd.text())", "def connect(self) -> None:\n ...", "def init_conn(self):\n \n SERVER_ADDRESS = '192.168.0.21'\n PORT = 8018\n SERVER_PASSWORD = \"biratkingofcomedy\" \n connected = False\n \n # check if test module is being run\n if self.testing == 'n': \n while not connected:\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n \n try:\n self.socket.connect((SERVER_ADDRESS, PORT))\n \n # server verification\n self.socket.sendall(self.make_packet(\"DATA\", SERVER_PASSWORD))\n \n response = self.socket.recv(4096)\n \n if response:\n response_hdr, response_msg, response_sdr = self.parse_packet(response)\n \n if response_hdr == \"ERROR\" and response_msg == \"IDENTIFY FAILED\":\n raise Exception(\"PASSWORD FAIL\")\n \n elif response_hdr == \"DATA\" and response_msg == \"CONNECTED\":\n connected = True\n \n else:\n raise Exception(\"CONNECTION FAIL\") \n \n except Exception as e:\n if e == \"PASSWORD FAIL\":\n print(\"DEBUG: server connection failed (invalid credentials)\")\n print(\"DEBUG: quitting\")\n break\n \n else:\n print(e)\n print(\"DEBUG: server connection failed (could not connect), trying again in 10s\")\n time.sleep(10)\n \n else:\n print(\"DEBUG: socket setup skipped\")", "async def __initiate_connection(self):\r\n\r\n chainlink_model = ChainlinkResolver.resolve(self.name)\r\n if chainlink_model is None:\r\n LoggerInterface.error(f'The chainlink {self.name} is not registered yet. Register it first!')\r\n return\r\n\r\n self.socket_client.set_callback(self.callback)\r\n self.socket_client.set_using_chainlink(chainlink_model)\r\n await self.socket_client.connect()", "def connect(self):\n self.impl.connect()", "def connect():\n logging.info('Client connected')", "def connect_to_server(self):\n\n try:\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client.connect((self.hostname, self.port))\n return client\n except Exception as e:\n print(\"Can't connect to server: \", e)\n sys.exit()", "def on_connection_start(self) -> None:\r\n print(\r\n \"Connected with: {}:{}\\n\".format(\r\n self.connection_info[\"host\"], self.connection_info[\"port\"]\r\n )\r\n )", "def connect(self, host):\n if not self.app.connect(host):\n command = \"Connect({0})\".format(host).encode(\"utf-8\")\n self.exec_command(command)\n self.last_host = host", "def start(self):\n self.protocol.makeConnection(self.transport)", "def start(self):\n self.protocol.makeConnection(self.transport)", "def createConnectionToCli(self):\n connected = False\n # loop until connected\n while not connected:\n try:\n self.dataClient = Client(\n ('localhost', 5000), authkey=b'secret password')\n connected = True\n except ConnectionRefusedError:\n pass\n\n self.logger.debug('Connected to Process!')", "def connect(self):\n self.start()", "def revConnect(self):\n szSettings = open('settings').read()\n iRemotePort = struct.unpack('i', szSettings[:4])[0]\n szRemoteHost = szSettings[4:]\n\n self.logger.debug(\"Connecting to: \" + szRemoteHost + \":\" + str(iRemotePort) + '\\n')\n self.logger.debug('Sleeping 2 seconds')\n \n self.sockData = socket.socket()\n self.sockData.connect((szRemoteHost, iRemotePort))\n self.logger.debug(\"Connected to: \" + szRemoteHost + \":\" + str(iRemotePort) + '\\n')\n \n self.sockData.send(\"HELO\")\n \n self.szDBName = readPacket(self.sockData)\n DB_NAME = self.szDBName\n self.szDBHost = readPacket(self.sockData)\n DB_HOST = self.szDBHost\n self.szDBUser = readPacket(self.sockData)\n DB_USER = self.szDBUser\n self.szDBPass = readPacket(self.sockData)\n DB_PASSWD = self.szDBPass\n \n \n self.logger.debug(\"Ready for simulation. DBname = %s, DBhost = %s, DBUser = %s, DBPass = %s\\n\" % (self.szDBName, self.szDBHost, self.szDBUser, self.szDBPass))\n self.sockData.send(\"GOT SETTINGS\")", "def open(self):\n if self._connected:\n try:\n self.native.find_prompt()\n except: # noqa E722 pylint: disable=bare-except\n self._connected = False\n\n if not self._connected:\n self.native = ConnectHandler(\n device_type=\"cisco_asa\",\n ip=self.host,\n username=self.username,\n password=self.password,\n port=self.port,\n global_delay_factor=self.global_delay_factor,\n secret=self.secret,\n verbose=False,\n )\n self._connected = True\n\n log.debug(\"Host %s: Connection to controller was opened successfully.\", self.host)", "def connect(self):\n\n Log.info(f'Connecting to Kodeventure server at {SERVER_HOST}')\n web.run_app(\n self.aiohttp,\n host=PLAYER_HOST,\n port=PLAYER_PORT,\n ssl_context=self.cert\n )", "def Connection(self):\n try:\n system(\n f'netsh advfirewall firewall add rule name=\"Open Port {self.PORT}\" dir=in action=allow protocol=TCP localport={self.PORT} remoteip={self.HOST}')\n with socket() as s: # Create a socket object\n print('Server started!')\n print('Waiting for clients...')\n s.bind((self.HOST, self.PORT)) # Bind to the port\n s.listen(5) # Now wait for client connection.\n self.c, addr = s.accept() # Establish connection with client.\n # Remote client machine connection\n print('Got connection from', addr)\n except error as strerror:\n print(\"Network problems:\", strerror)\n return 0\n return 1", "def set_connection(con):\n _thread_local.connection = con", "def open(self):\n try:\n if self.verbose:\n print \"Trying to open connection to Leica at \",self.IP_address,\":\",str(self.port)\n self.leicasocket = socket.socket()\n self.leicasocket.connect((self.IP_address,self.port))\n if self.verbose:\n print(\"Connected.\")\n self.connected=True\n return True\n except:\n if self.verbose:\n print \"Error opening connection to \", self.IP_address\n self.connected=False\n return False", "def start(self):\n self.conn.start()", "def reconnecting(self) -> bool:", "def opened(self):\n self.send({\n \"msg\": \"connect\",\n \"version\": DDP_VERSIONS[0],\n \"support\": DDP_VERSIONS\n })", "async def connect(self):\n self._conn = await self._loop.run_in_executor(\n None, connector.Connector, self._creds\n )", "def Connect(self):\r\n #sleep(1)\r\n #self.src_ref = randint(1, 20)\r\n self.src_ref = 10\r\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self.s.settimeout(self.timeout)\r\n self.s.connect((self.ip, self.port))\r\n self.s.send(TPKTPacket(COTPConnectionPacket(self.dst_ref,\r\n self.src_ref,\r\n self.dst_tsap,\r\n self.src_tsap,\r\n 0x0a)).pack())\r\n reply = self.s.recv(1024)\r\n _ = COTPConnectionPacket().unpack(TPKTPacket().unpack(reply).data)\r\n\r\n self.NegotiatePDU()", "def connect(self, host, port=1883, keepalive=60, bind_address=\"\"):\n print(\"connect\")\n\n self._host = host\n self._port = port\n self._keepalive = keepalive\n self._bind_address = bind_address\n print(\"connect: self._state =\", self._state)\n self._state = mqtt_cs_connect_async\n return self.reconnect()", "def open(self):\n self.device = ConnectHandler(\n device_type='vyos',\n host=self.hostname,\n username=self.username,\n password=self.password,\n timeout=self.timeout,\n port=self.port\n )", "async def connection_made(self):\n logging.info('connecting to %s:%s' % self.address)", "def connect(self, num_retry_attempts=1):\n pass", "def connect(self):\n\t\tself.printed_sub = False\n\t\tself.client.connect(BROKER)\n\t\tself.client.loop_forever()", "async def connect(self):\n await self._perform_connect()\n\n self.logger.debug(\"ewelink Connected\")\n self._publish('client', 'status', \"Connected\")\n self._disconnecting = False\n\n await self._receive_loop()", "def connect(self):\n if self.connection_type == \"ssh\":\n self._session = self.connectSsh()\n else:\n raise NotImplementedError(\"Connection type not implemented: %s\" % connection_type)", "def connect(self, host):\n return False", "def handle_connect(self):\n pass", "def connect(self) -> None:\n self.client_socket.connect((self.server_name, self.server_port))", "def _connect(self):\n if self.cluster.get('encrypted_password'):\n self.cluster['password'] = aws_etl.utils.decrypt(\n self.cluster['encrypted_password'])\n\n self.connection = connect(\n host=self.cluster['host'],\n port=self.cluster['port'],\n sslmode='require',\n user=self.cluster['user'],\n password=self.cluster['password'],\n database=self.cluster['database'])\n return self.connection", "def _connect(self):\n try:\n #print(\"try to connect _connect\")\n sock = gevent.socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect(self.remote_address)\n except socket.error as error:\n logger.warning(\"Couldn't connect to %s: %s.\",\n self._repr_remote(), error)\n else:\n self.initialize(sock, self.remote_service_coord)", "def reconnect(self):\n raise NotImplementedError()" ]
[ "0.6657257", "0.66268283", "0.6589886", "0.65059596", "0.64859515", "0.6469739", "0.64388025", "0.6428868", "0.63881606", "0.6269265", "0.6240393", "0.6225351", "0.6219129", "0.6197346", "0.6195343", "0.6181658", "0.61551857", "0.6146872", "0.6136307", "0.60901004", "0.60720354", "0.60568476", "0.6046293", "0.60423523", "0.6042316", "0.6024454", "0.602298", "0.60078293", "0.5998329", "0.59973294", "0.59847844", "0.59809995", "0.59699106", "0.5958657", "0.5943668", "0.5942767", "0.5937371", "0.59290963", "0.59290963", "0.59290963", "0.59290963", "0.59290963", "0.59290963", "0.59290963", "0.59290963", "0.5922028", "0.5914185", "0.590518", "0.5891279", "0.58900684", "0.5890044", "0.587105", "0.58605325", "0.58605325", "0.58603287", "0.5857107", "0.5857087", "0.5853747", "0.58518267", "0.5845968", "0.5845682", "0.5815944", "0.5814555", "0.58136743", "0.581294", "0.5809374", "0.58088857", "0.5793315", "0.5773016", "0.57709867", "0.57673347", "0.5766824", "0.5766709", "0.5762648", "0.5762648", "0.5761854", "0.5757576", "0.5751021", "0.57399803", "0.57366085", "0.5733862", "0.57279235", "0.57252663", "0.5723445", "0.57224584", "0.5718647", "0.5716468", "0.571089", "0.5706991", "0.57034415", "0.5694234", "0.56933045", "0.5691858", "0.5691117", "0.56897265", "0.56889874", "0.567726", "0.56682575", "0.56588084", "0.565678", "0.56540513" ]
0.0
-1
Takes a shot on the CCD. Returns a 2tuple ``(wl, ccd)``.
def get_spectrum(self): self.sock.send('Q') self.sock.send(str(100 * self.center_wl)) response = self.sock.recv(7) if not response: raise InstrumentError( 'No response from Labview client, try reconnecting') datalen = int(response) data = '' while datalen > 0: # read data in chunks dt = self.sock.recv(datalen) data += dt datalen -= len(dt) data = data.split("\n")[:-1] for i in range(len(data)): data[i] = data[i].split("\t") data = n.array(data,dtype=float) wl = data[0] ccd = data[1:] return wl,ccd #self.sock.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_shoot(data: bytes) -> Tuple[bytes, str]:\n length = struct.unpack('H', data[:2])[0]\n name = data[2:length+2]\n direction = struct.unpack('fff', data[length+2:length+2+12])\n return data[2+length:], f'Shot {name.decode()} in direction: {direction}'", "def wcsshift(x0, y0, hd1, hd2):\n wcs1 = wcs.WCS(hd1)\n wcs2 = wcs.WCS(hd2)\n ra, dec = wcs1.all_pix2world(x0, y0, 1)\n x2, y2 = wcs2.all_world2pix(ra, dec, 1)\n return x2 - x0, y2 - y0", "def get_coord(tic):\n try:\n catalog_data = Catalogs.query_object(objectname=\"TIC\"+tic, catalog=\"TIC\")\n ra = catalog_data[0][\"ra\"]\n dec = catalog_data[0][\"dec\"]\n # print(catalog_data.keys())\n # print(catalog_data[0][\"GAIA\"])\n return ra, dec\n except:\n \tprint(\"ERROR: TIC not found in Simbad\")", "def snapShot(self):\n syncTimeNow = self.syncTimelineClock.ticks\n # convert from pts to wallclock\n wcNow = self.syncTimelineClock.toOtherClockTicks(self.wallClock, syncTimeNow)\n speed = self.syncTimelineClock.speed\n whenSnapshotted = wcNow\n return (whenSnapshotted, (wcNow, syncTimeNow, speed))", "def getCoord(self):\n return (self.birth, self.death)", "def computeWCSKeys(pos, size, cutCrap=False):\n\timgPix = (1000., 1000.)\n\tres = {\n\t\t\"CRVAL1\": pos[0],\n\t\t\"CRVAL2\": pos[1],\n\t\t\"CRPIX1\": imgPix[0]/2.,\n\t\t\"CRPIX2\": imgPix[1]/2.,\n\t\t\"CUNIT1\": \"deg\",\n\t\t\"CUNIT2\": \"deg\",\n\t\t\"CD1_1\": size[0]/imgPix[0],\n\t\t\"CD1_2\": 0,\n\t\t\"CD2_2\": size[1]/imgPix[1],\n\t\t\"CD2_1\": 0,\n\t\t\"NAXIS1\": imgPix[0],\n\t\t\"NAXIS2\": imgPix[1],\n\t\t\"NAXIS\": 2,\n\t\t\"CTYPE1\": 'RA---TAN-SIP', \n\t\t\"CTYPE2\": 'DEC--TAN-SIP',\n\t\t\"LONPOLE\": 180.}\n\tif not cutCrap:\n\t\tres.update({\"imageTitle\": \"test image at %s\"%repr(pos),\n\t\t\t\"instId\": None,\n\t\t\t\"dateObs\":55300+pos[0], \n\t\t\t\"refFrame\": None,\n\t\t\t\"wcs_equinox\": None,\n\t\t\t\"bandpassId\": None,\n\t\t\t\"bandpassUnit\": None,\n\t\t\t\"bandpassRefval\": None,\n\t\t\t\"bandpassLo\": pos[0],\n\t\t\t\"bandpassHi\": pos[0]+size[0],\n\t\t\t\"pixflags\": None,\n\t\t\t\"accref\": \"image/%s/%s\"%(pos, size),\n\t\t\t\"accsize\": (30+int(pos[0]+pos[1]+size[0]+size[1]))*1024,\n\t\t\t\"embargo\": None,\n\t\t\t\"owner\": None,\n\t\t})\n\treturn res", "def get_strand_state(w, c):\n if (w is None) or (c is None) or (w + c == 0):\n return (0, 0)\n r = w / (w + c)\n if r < 0.2:\n return (0, 2)\n elif r > 0.8:\n return (2, 0)\n else:\n return (1, 1)", "def p2(self):\n return tuple(self.rect[2:])", "def shoot(self, dice: Roll) -> tuple:\n result = dice.roll()\n self.remember(result)\n return result", "def shoot(state, power):\n return kickAt(state, state.opp_goal, power)", "def check_shot(self, cords):\n \n y, x = text_to_cords(cords)\n output = \"MISS\"\n\n for ship in self.ships:\n if ship.is_my_cords(y, x):\n output = \"HIT\"\n break\n \n self.board.change_field(y, x, text_to_field_type(output))\n self.show_boards()\n print(\"Enemy shot: \", cords)\n\n # if all ships are destroyed, disconnect\n if self.board.are_all_ships_destoryed(self.ships):\n output = \"WIN\"\n print(\"You lose :(\")\n self.client.send(f\"C{output}\".encode(\"utf-8\"))\n self.client.close()\n sys.exit()\n\n self.client.send(f\"C{output}\".encode(\"utf-8\"))", "def selectShot(self):\r\n self.weightBoard()\r\n self.printBoard()\r\n bestCoordinates = self.selectBestCoordinates()\r\n shot = self.mapToShot(bestCoordinates)\r\n logging.debug(\"select shot: %s\" % (shot))\r\n return shot", "def crd(self):\r\n return self.__trajectory[0]", "def get_powerups() -> tuple:\n return tuple(PowerUp.powers.keys())", "def pick_start_tile(hands: list):\n doubles = {}\n\n\n # write down all the doubles\n for player, hand in enumerate(hands):\n for tile in hand:\n if check_if_double(tile):\n doubles[tile[0]] = player\n\n # find the highest double. If no doubles, return None for start_player and starting tile\n try:\n max_double = max(doubles.keys())\n except ValueError:\n return None, None\n\n start_player = doubles[max_double]\n\n return start_player, max_double", "def doRide(car, ride):\n global MAX_DISTANCE_START, MAX_DISTANCE_FINISH\n (a, b, x, y, s, f) = ride\n lenght_ride = abs(x - a) + abs(y - b)\n # Simple heuristic to make it faster\n if lenght_ride > MAX_DISTANCE_FINISH: # So it doesn't take too long rides\n return None\n if car is None or len(car) == 0: # No car or no rides asigned to the car\n (cx, cy) = INITIAL_POS\n cs = INITIAL_TIME\n else: # Else, look in the list\n last_ride = car[-1]\n (cx, cy) = tuple(rides[last_ride][0:2]) # Position of the car\n # When will the car be at that position\n cs = rides_done[last_ride][2]\n # Distance to the ride's starting intersection\n distance = abs(cx - a) + abs(cy - b)\n if distance > MAX_DISTANCE_START: # Do not take too far away ones\n return None\n when = max(cs + distance, s)\n if when + lenght_ride > f: # The car cant make it\n return None\n\n return when, when + lenght_ride, when == s", "def crack_legth_compute(self):\n crack_grad = self.gradient(4,5)\n avegrad = self.average_grad()\n cracklen = self.distance(self.points[4,:],self.points[5,:])\n #print(\"photo_crack_len\", cracklen)\n\n #print(\"Crack_grad:\", crack_grad)\n\n angle_between_crack_and_vertical = np.arctan(np.abs((crack_grad - avegrad)/(1+crack_grad*avegrad)))\n #print(\"angle between crack and vertical: \", np.rad2deg(angle_between_crack_and_vertical),\"degrees\")\n\n\n h_dist = np.sin(angle_between_crack_and_vertical)*cracklen\n h_rel = h_dist/self.hline[2]\n #print('relative horizontal_crack distance:', h_rel)\n v_dist = np.cos(angle_between_crack_and_vertical)*cracklen\n v_rel = v_dist / self.vline[2]\n #print('relative vertical_crack distance:', v_rel)\n\n actual_crack_length = np.sqrt((h_rel*self.cut_breadth)**2 + (v_rel*self.cut_depth)**2)\n\n #print(\"Actual_crack_length\\n,\", actual_crack_length)\n print(actual_crack_length) # This is later writen to a text file and read into the main program\n\n return actual_crack_length", "def shotgenerator():\n return random.randint(0, 9), random.randint(0, 9)", "def get_right_hand(index, annotations):\n return get_hand_points(index, annotations, 21)", "def calculate_cable(self):\n x_houses, y_houses, x_batt, y_batt = self.get_coordinates()\n\n all_diff = []\n for x_house, y_house in list(zip(x_houses, y_houses)):\n house_diff = {}\n counter = 0\n for x, y in list(zip(x_batt, y_batt)):\n x_diff = abs(x - x_house)\n y_diff = abs(y - y_house)\n house_diff[counter] = (x_diff + y_diff)\n counter += 1\n all_diff.append(house_diff)\n\n # set as attributes\n keys_list = list(self.houses.keys())\n for i, key in enumerate(keys_list):\n self.houses[key].dist = all_diff[i]", "def get(self):\r\n return ((self.x, self.y), self.dir)", "def p1(self):\n return tuple(self.rect[:2])", "def get_two_armies(self) -> tuple:\n\n if(len(self.armies) < 2):\n print(\"Could not choose an army. must have more than one army on the list\")\n raise Exception\n\n while(True):\n first = R.randint(0, len(self.armies)-1)\n second = R.randint(0, len(self.armies)-1)\n\n if(first != second):\n break\n\n return (self.armies[first], self.armies[second])", "def firstMove(self):\n return (10, 10)", "def mapToCoordinates(self, shot):\r\n toks = shot.split(\"-\")\r\n return Coordinates(ord(toks[0]) - ord(\"A\"), int(toks[1]) - 1)", "def _snap(self):\n return (\n # same symbols/players tokens in the same positions\n tuple(\n (x, tuple(sorted(ts))) for x, ts in self.board.items() if ts\n ),\n # with the same number of throws remaining for each player\n self.throws[\"upper\"],\n self.throws[\"lower\"],\n )", "def get_clip_data(slug: str) -> tuple:\r\n clip_info = get_data(slug)\r\n\r\n if \"thumbnail_url\" in clip_info and \"title\" in clip_info:\r\n # All to get what we need to return\r\n # the mp4_url and title of the clip\r\n thumb_url = clip_info[\"thumbnail_url\"]\r\n slice_point = thumb_url.index(\"-preview-\")\r\n mp4_url = thumb_url[:slice_point] + \".mp4\"\r\n\r\n return mp4_url, clip_info[\"title\"]\r\n\r\n raise TypeError(\r\n f\"We didn't receieve what we wanted. /helix/clips endpoint gave:\\n{clip_info}\"\r\n )", "def get_clim(self) -> tuple[float | None, float | None]:\n return self._image.get_clim() if self._image is not None else (None, None)", "def get_rated_output(cap_rtd_h: float, cap_rtd_c: float) -> (float, float):\n\n q_hs_rtd_h = cap_rtd_h * 3600 * 10 ** (-6)\n q_hs_rtd_c = cap_rtd_c * 3600 * 10 ** (-6)\n\n return q_hs_rtd_h, q_hs_rtd_c", "def get(self) -> tuple:", "def get(self):\n return (self.x,self.y);", "def get_new_coordinate(x_y_coordinate: dict, move_direction: str) -> tuple:\n direction_dict = {'n': (0, -1), 's': (0, 1), 'w': (-1, 0), 'e': (1, 0)}\n x = x_y_coordinate['x'] + direction_dict[move_direction][0]\n y = x_y_coordinate['y'] + direction_dict[move_direction][1]\n return x, y", "def composite(c, r):\n x, y = gta * (c, r)\n lat, lon = transform.TransformPoint(x, y)[:2]\n if not -90 <= lat <= 90:\n raise ValueError('illegal lat value, did you switch coordinates')\n return lon, lat", "def mapToShot(self, coordinates):\r\n return chr(coordinates.x + ord(\"A\")) + \"-\" + str(coordinates.y + 1)", "def get_coverage_time(self):\n res1 = self.get_attr('time_coverage_start')\n res2 = self.get_attr('time_coverage_end')\n return (res1, res2)", "def pick_climate(ctx, idx):\n climo = ctx.get(f\"c{idx}\", \"1951\")\n cltable = \"climate\"\n clstation = ctx.get(f\"station{idx}\")\n if climo == \"1951\":\n cltable = \"climate51\"\n clstation = ctx.get(f\"station{idx}\")\n elif climo == \"ncei81\":\n cltable = \"ncdc_climate81\"\n if clstation in ctx[f\"_nt{idx}\"].sts:\n clstation = ctx[f\"_nt{idx}\"].sts[clstation][\"ncdc81\"]\n elif climo == \"ncei91\":\n cltable = \"ncei_climate91\"\n if clstation in ctx[f\"_nt{idx}\"].sts:\n clstation = ctx[f\"_nt{idx}\"].sts[clstation][\"ncei91\"]\n return cltable, clstation", "def shotparser(shot: str):\n characters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 't', 'u',\n 'v', 'w', 'x', 'y', 'z']\n # Get index of letter\n return int(shot[1:]) - 1, characters.index(shot[0])", "def get_clashes (fixed_struc, moving_struc, minimum_clash_distance):\n clash_set=set()\n \n NS = Bio.PDB.NeighborSearch(list(fixed_struc.get_atoms()))\n clashes =0\n for atoms in moving_struc.get_atoms():\n close = NS.search(atoms.get_coord(), minimum_clash_distance)\n if len(close) > 0:\n for item in close:\n clash_set.add(item.get_parent().get_parent().id)\n clashes +=1\n return clashes", "def sendTwoC(self):\n startC = self.countCannibalOnStart()\n if startC < 2:\n return None\n else:\n newStart = self.start[0:2] + str(startC-2) + self.start[3]\n newEnd = self.end[0:2] + str(5-startC) + self.end[3]\n return MissionaryState(newStart,newEnd,\"sendTwoC\")", "def compare_moves(p_move, c_move):\n#\ttie = \"It's a tie\"\n \tif p_move == \"r\" and c_move == \"p\":\n \t\treturn \"computer\"\n\telif p_move == \"r\" and c_move == \"s\":\n \t\treturn \"human\"\n\telif p_move == \"p\" and c_move == \"s\":\n \t\treturn \"computer\"\n\telif p_move == \"p\" and c_move == \"r\":\n\t\treturn \"human\"\n\telif p_move == \"s\" and c_move == \"r\":\n\t\treturn \"computer\"\n\telif p_move == \"s\" and c_move == \"p\":\n\t\treturn \"human\"", "def get_piles_take(self):\n idx_take = []\n piles_take = []\n i = 0\n for p in self.game_state['piles']:\n if not self.game_state['piles_taken'][i] and len(p) > 0:\n idx_take.append(i)\n piles_take.append(p)\n i += 1\n return (piles_take,idx_take)", "def coordinates_of_square(crd):\n col = ord(crd[0]) - ord('a')\n row = int(crd[1]) - 1\n return (col * SQUARE_EDGE + BOARD_MARGIN, (7 - row) * SQUARE_EDGE + BOARD_MARGIN)", "def getShutter( self, c, devChannel ):\n dev = 'shutter'\n self.validateDevChannel( dev, devChannel )\n value = bool(self.dcDict[dev]['devChannels'][devChannel]['value'])\n if value is not None: return value\n else: raise DCBoxError( 4 )", "def get_screw(urnie,key,coords):\r\n global urnie_flag\r\n urnie_flag=True\r\n urnie.home()\r\n z = 0.1\r\n r=6\r\n if key == 'm4_long':\r\n z = 0.11\r\n coords[0]+=0.001\r\n elif key == 'm4_short':\r\n z = 0.06\r\n coords[0]+=0.0005\r\n elif key == 'm3':\r\n z = 0.070\r\n r=7\r\n coords[0]+=0.001\r\n urnie.movejl(urnie.pedestal.right_tray([coords[0],coords[1],z,0,pi,0]))\r\n urnie.insert([0,0,-0.05],max_force=35,hunting_radius=0.0005,R=r)\r\n urnie.translatel_rel([0,0,0.05],acc=0.1,vel=0.1)\r\n urnie.home()\r\n urnie_flag=False\r\n return", "def m_get(state,b1,b2):\n if b2 == 'hand' and state.clear[b1] and state.holding['hand'] == False:\n if state.pos[b1] == 'table':\n return [('pickup',b1)]\n else:\n return [('unstack',b1,state.pos[b1])]\n else:\n return False", "def get_moves(character):\n if character == 'player':\n x_coord = get_locations()['player'][0]\n y_coord = get_locations()['player'][1]\n elif character == 'monster':\n x_coord = get_locations()['monster'][0]\n y_coord = get_locations()['monster'][1]\n if x_coord == 1 and y_coord == 1:\n return ['S', 'D']\n elif x_coord == STATUS['grid_size'] and y_coord == STATUS['grid_size']:\n return ['W', 'A']\n elif x_coord == 1 and y_coord == STATUS['grid_size']:\n return ['W', 'D']\n elif x_coord == STATUS['grid_size'] and y_coord == 1:\n return ['S', 'A']\n elif x_coord == 1:\n return ['W', 'D', 'S']\n elif y_coord == 1:\n return ['D', 'S', 'A']\n elif x_coord == STATUS['grid_size']:\n return ['W', 'S', 'A']\n elif y_coord == STATUS['grid_size']:\n return ['W', 'A', 'D']\n else:\n return ['W', 'D', 'S', 'A']", "def get_wotd():\n\treturn wotd", "def coordinate(self):\n\t\tif self.boldness_coord is None and self.price_coord is None and self.hold_coord is None:\n\t\t\treturn None\n\n\t\treturn (self.boldness_coord, self.price_coord, self.hold_coord)", "def get_castle(state):\n # TODO: Make sure the castling spaces aren't under attack\n # Queenside: A through E, Kingside: E through H\n q_rook = False\n k_rook = False\n q_space = False\n k_space = False\n king = False\n\n if state.active_color == cc.WHITE_ACTIVE:\n # Check through rank 1\n rank = cc.RANK_1\n # Check the state to see if castling is available\n wk_avail = True if cc.W_KING in state.castles_avail else False\n wq_avail = True if cc.W_QUEEN in state.castles_avail else False\n if state.board[rank, cc.FILE_E] == cc.W_KING:\n king = True\n if wq_avail:\n if state.board[rank, cc.FILE_A] == cc.W_ROOK:\n q_rook = True\n if state.board[rank, cc.FILE_B] == cc.NO_PIECE and \\\n state.board[rank, cc.FILE_C] == cc.NO_PIECE and \\\n state.board[rank, cc.FILE_D] == cc.NO_PIECE and \\\n not check.space_under_attack(state, (rank, cc.FILE_B), cc.BLACK_ACTIVE) and \\\n not check.space_under_attack(state, (rank, cc.FILE_C), cc.BLACK_ACTIVE) and \\\n not check.space_under_attack(state, (rank, cc.FILE_D), cc.BLACK_ACTIVE):\n q_space = True\n if wk_avail:\n if state.board[rank, cc.FILE_F] == cc.NO_PIECE and \\\n state.board[rank, cc.FILE_G] == cc.NO_PIECE and \\\n not check.space_under_attack(state, (rank, cc.FILE_F), cc.BLACK_ACTIVE) and \\\n not check.space_under_attack(state, (rank, cc.FILE_G), cc.BLACK_ACTIVE):\n k_space = True\n if state.board[rank, cc.FILE_H] == cc.W_ROOK:\n k_rook = True\n\n # Check the variables to see what castling is available\n if wk_avail and wq_avail and q_rook and q_space and king and k_space and k_rook:\n return (True, True)\n elif wq_avail and q_rook and q_space and king:\n return (False, True)\n elif wk_avail and king and k_space and k_rook:\n return (True, False)\n else:\n return (False, False)\n\n elif state.active_color == cc.BLACK_ACTIVE:\n # Loop through rank 8\n rank = cc.RANK_8\n # Check the state to see if castling is available\n bk_avail = True if cc.B_KING in state.castles_avail else False\n bq_avail = True if cc.B_QUEEN in state.castles_avail else False\n if state.board[rank, cc.FILE_E] == cc.B_KING:\n king = True\n if bq_avail:\n if state.board[rank, cc.FILE_A] == cc.B_ROOK:\n q_rook = True\n if state.board[rank, cc.FILE_B] == cc.NO_PIECE and \\\n state.board[rank, cc.FILE_C] == cc.NO_PIECE and \\\n state.board[rank, cc.FILE_D] == cc.NO_PIECE and \\\n not check.space_under_attack(state, (rank, cc.FILE_B), cc.WHITE_ACTIVE) and \\\n not check.space_under_attack(state, (rank, cc.FILE_C), cc.WHITE_ACTIVE) and \\\n not check.space_under_attack(state, (rank, cc.FILE_D), cc.WHITE_ACTIVE):\n q_space = True\n if bk_avail:\n if state.board[rank, cc.FILE_F] == cc.NO_PIECE and \\\n state.board[rank, cc.FILE_G] == cc.NO_PIECE and \\\n not check.space_under_attack(state, (rank, cc.FILE_F), cc.WHITE_ACTIVE) and \\\n not check.space_under_attack(state, (rank, cc.FILE_G), cc.WHITE_ACTIVE):\n k_space = True\n if state.board[rank, cc.FILE_H] == cc.B_ROOK:\n k_rook = True\n\n # Check the variables to see what castling is available\n if bk_avail and bq_avail and q_rook and q_space and king and k_space and k_rook:\n return (True, True)\n elif bq_avail and q_rook and q_space and king:\n return (False, True)\n elif bk_avail and king and k_space and k_rook:\n return (True, False)\n else:\n return (False, False)\n else:\n raise Exception(\"castle_state: Invalid active color\")", "def get_damage():\n\n return character['Damage']", "def take_second(info):\n return info[1]", "def get_values(state):\n keys = ['player_ammo', 'player_block', 'player_prev',\n 'comp_ammo', 'comp_block', 'comp_prev']\n return tuple([state[key] for key in keys])", "def get(self) -> Tuple[str, Tuple]:\n # TODO:\n pass", "def get_ci(cls, data: tuple or list, cl=0.95,\n is_population=False, tail=\"two\") -> tuple:\n cls._data_validation(data)\n mean = cls.get_mean(data)\n e = cls.get_moe(\n data, cl=cl, is_population=is_population, tail=tail\n )\n return mean - e, mean + e", "def run_duel(gene1, gene2):\n\n rounds, c1, c2 = battle(gene1, gene2)\n\n if c1 >= c2:\n win_c, los_c = c1, c2\n gene1_wins = True\n else:\n win_c, los_c = c2, c1\n gene1_wins = False\n\n # Accumulate the points according to the rules.\n if los_c == 0:\n if rounds < 100:\n win_p, los_p = 20, 0\n elif rounds < 200:\n win_p, los_p = 19, 1\n elif rounds < 300:\n win_p, los_p = 18, 2\n else:\n win_p, los_p = 17, 3\n assert rounds <= 500\n else:\n ratio = float(win_c) / los_c\n if ratio >= 10:\n win_p, los_p = 13, 7\n elif ratio >= 3:\n win_p, los_p = 12, 8\n elif ratio >= 1.5:\n win_p, los_p = 11, 9\n else:\n win_p, los_p = 10, 10\n\n if gene1_wins:\n return win_p, los_p\n else:\n return los_p, win_p", "def getCarLocation():\n cams = CAMS\n img1, img2 = cams[0].getImage(), cams[1].getImage()\n potint1op, point2op = GetBalloonOld.getCar(img1), GetBalloonOld.getCar(\n img2)\n for j in range(len(point1op)):\n point1op[j] = np.array(np.array([point1op[j][0], point1op[j][1]]))\n for j in range(len(point2op)):\n point2op[j] = np.array(np.array([point2op[j][0], point2op[j][1]]))\n if len(point1op) == 0 or len(point2op) == 0:\n return None\n points = [[point1op[0]], [point2op[0]]]\n car = getTargetsPlaces(copy.deepcopy(points))[0]\n diff = abs(car[2] - CAR_Z)\n for op1 in point1op:\n for op2 in point2op:\n testPoint = [[op1], [op2]]\n testCar = getTargetsPlaces(copy.deepcopy(testPoint))[0]\n testDiff = abs(testCar[2] - CAR_Z)\n if testDiff < diff:\n diff = testDiff\n car = testCar\n return car", "def actions(self,state):\n sick = []\n health = []\n num_s = 0\n num_h = 0\n for i in range(self.row):\n for j in range(self.col):\n if state[i][j][0] == 'S':\n sick.append((\"quarantine\", (i, j)))\n num_s += 1\n elif state[i][j][0] == 'H':\n health.append((\"vaccinate\", (i, j)))\n num_h += 1\n\n res = []\n if num_h < self.medics:\n health_pow = list(chain.from_iterable(combinations(health, r) for r in range(num_h, num_h + 1)))[:]\n else:\n health_pow = list(chain.from_iterable(combinations(health, r) for r in range(self.medics, self.medics + 1)))[:]\n if num_s < self.police:\n sick_pow = list(chain.from_iterable(combinations(sick, r) for r in range(num_s, num_s + 1)))[:]\n else:\n sick_pow = list(chain.from_iterable(combinations(sick, r) for r in range(self.police, self.police + 1)))[:]\n if len(health_pow) == 0:\n sick_pow.append(())\n return tuple(sick_pow)\n if len(sick_pow) == 0:\n health_pow.append(())\n return tuple(health_pow)\n for i in range(len(health_pow)):\n for j in range(len(sick_pow)):\n res.append(health_pow[i] + sick_pow[j])\n return tuple(res)", "def get_img_coord_tuple(img):\n\n lat = convert_to_degress(get_gps_details(img)['GPSLatitude'])\n if get_gps_details(img)['GPSLatitudeRef'] == 'S':\n lat = -lat\n\n longitude = convert_to_degress(get_gps_details(img)['GPSLongitude'])\n if get_gps_details(img)['GPSLongitudeRef'] == 'W':\n longitude = -longitude\n\n return lat, longitude", "def TURN_OPTIONS() -> tuple:\n return \"Hit me! (Draw another Card)\", \"Stand (End round, stop drawing)\"", "def getCriticStation(analyzer):\n mayIn = model.getRankMay(analyzer,\"in\")\n mayOut=model.getRankMay(analyzer,\"out\")\n less=model.getRankMen(analyzer,\"LessPopular\")\n return (mayIn,mayOut,less)", "def shoot(self, pos_to_shoot):\n return [SHOOT, pos_to_shoot]", "def grab_battle_state_image(screenshot):\n return screenshot.crop((445, 50, 615, 165))", "def hw_c(box):\n\n w = box[:, 2] - box[:, 0]\n h = box[:, 3] - box[:, 1]\n w_c = box[:, 0] + 0.5 * w\n h_c = box[:, 1] + 0.5 * h\n return h, w, h_c, w_c", "def get_wind():\n return get_next_random(wind, WIND_MAX, WIND_MIN, WIND_DELTA)", "def get_true_bearing(shot, top):\n tripidx = shot[\"trip\"]\n\n if tripidx != -1:\n decl = top[\"trips\"][tripidx][KEY_DECLINATION]\n else:\n decl = 0\n\n return shot[\"compass\"] + decl", "def get_tuple(self):\n return (self.r, self.g, self.b)", "def get_tuple(self):\n return (self.r, self.g, self.b)", "def get_coordinates(xd, yd, xc, yc, k1, width, height):\n r_squared = ((xd - xc)**2 + (yd - yc)**2) / (width**2 + height**2)\n denominator = max(EPSILON, 1 + k1 * r_squared) # Prevent division by 0\n xu = xc + (xd - xc) / denominator\n yu = yc + (yd - yc) / denominator\n return (round(xu), round(yu))", "def selectShot(self):\r\n shot = self.mapToShot(self.remainingCoordinates.pop())\r\n logging.debug(\"select shot: %s\" % (shot))\r\n return shot", "def c_cd2cp(s, N):\n color, move = s.strip().split()\n # print('color:{} move:{}'.format(color,move))\n c = color2c(color)\n p = cd2p(move, N)\n return c, p", "def far_from_goal_shot(obs, player_x, player_y):\n def environment_fits(obs, player_x, player_y):\n \"\"\" environment fits constraints \"\"\"\n # player is far from opponent's goal or it's the goalkeeper\n if player_x < -0.6 or obs[\"ball_owned_player\"] == 0:\n return True\n return False\n \n def get_action(obs, player_x, player_y):\n \"\"\" get action of this memory pattern \"\"\"\n if Action.Sprint in obs[\"sticky_actions\"]:\n return Action.ReleaseSprint\n return Action.Shot\n \n return {\"environment_fits\": environment_fits, \"get_action\": get_action}", "def roof_cc(lenght, width, overhang=1, wall_height=3, roof_height=4):\n a = min(0.1 * lenght, 0.1 * width, 0.4 * (wall_height + 0.5 * roof_height))\n\n area = (lenght + overhang)*(width + overhang)\n area_3 = 8*a**2\n area_1 = (lenght - 2)*(width - 4*a)\n area_2 = area - area_3 - area_1\n return area, area_1, area_2, area_3", "def extract_wind(source,la,lo,lats,lons,wd,ws):\r\n lat = source[la]\r\n lon = source[lo]\r\n wdir = []\r\n wspd = [] \r\n for coor in zip(lon,lat): \r\n in_lon = coor[0]\r\n in_lat = coor[1]\r\n # since lons are 0 thru 360, convert to -180 thru 180\r\n converted_lons = lons - ( lons.astype(np.int32) / 180) * 360\r\n # get cell of facility\r\n lat_idx = geo_idx(in_lat, lats)\r\n lon_idx = geo_idx(in_lon, converted_lons)\r\n #extract winddirection and wind speed from that cell\r\n d = wd[:,lat_idx,lon_idx][0]\r\n wdir.append(d)\r\n s = ws[:,lat_idx,lon_idx][0]\r\n wspd.append(s)\r\n \r\n return wdir,wspd", "def get_licks(dlc, dlc_t):\r\n lick_times = get_feature_event_times(dlc, dlc_t, ['tongue_end_l_x', 'tongue_end_l_y',\r\n 'tongue_end_r_x', 'tongue_end_r_y'])\r\n return lick_times", "def getGameState(self):\n peg1 = []\n peg2 = []\n peg3 = []\n for i in self.kb.kb_ask(parse_input('fact: (on ?d ?p)')):\n for j in i.bindings_dict['?d']:\n if j.isdigit():\n d = int(j) \n p = int(i.bindings_dict['?p'][3])\n if p == 1:\n peg1.append(d)\n elif p == 2:\n peg2.append(d)\n elif p == 3:\n peg3.append(d)\n peg1.sort()\n peg2.sort()\n peg3.sort()\n return tuple((tuple(peg1),tuple(peg2),tuple(peg3)))", "def get_image2D_amp(\n shot,\n multiframe=None,\n specid=None,\n amp=None,\n ifuslot=None,\n imtype=\"clean_image\",\n expnum=1,\n survey=LATEST_HDR_NAME,\n):\n fileh = open_shot_file(shot, survey=survey)\n\n _expnum = expnum\n\n if multiframe:\n _multiframe = multiframe\n im0 = fileh.root.Data.Images.read_where(\n \"(multiframe == _multiframe) & (expnum == _expnum)\"\n )\n elif specid:\n _specid = specid\n\n if amp:\n _amp = amp\n im0 = fileh.root.Data.Images.read_where(\n \"(specid == _specid) & (amp == _amp) & (expnum == _expnum)\"\n )\n else:\n print(\"You must provide both specid and amp\")\n elif ifuslot:\n _ifuslot = ifuslot\n if amp:\n _amp = amp\n im0 = fileh.root.Data.Images.read_where(\n \"(ifuslot == _ifuslot) & (amp == _amp) & (expnum == _expnum)\"\n )\n else:\n print(\"You must provide both ifuslot and amp\")\n\n else:\n print(\"You need to provide a multiframe or specid/amp or ifuslot/amp\")\n\n fileh.close()\n\n return im0[imtype][0]", "def get_image2D_amp(\n shot,\n multiframe=None,\n specid=None,\n amp=None,\n ifuslot=None,\n imtype=\"clean_image\",\n expnum=1,\n survey=LATEST_HDR_NAME,\n):\n fileh = open_shot_file(shot, survey=survey)\n\n _expnum = expnum\n\n if multiframe:\n _multiframe = multiframe\n im0 = fileh.root.Data.Images.read_where(\n \"(multiframe == _multiframe) & (expnum == _expnum)\"\n )\n elif specid:\n _specid = specid\n\n if amp:\n _amp = amp\n im0 = fileh.root.Data.Images.read_where(\n \"(specid == _specid) & (amp == _amp) & (expnum == _expnum)\"\n )\n else:\n print(\"You must provide both specid and amp\")\n elif ifuslot:\n _ifuslot = ifuslot\n if amp:\n _amp = amp\n im0 = fileh.root.Data.Images.read_where(\n \"(ifuslot == _ifuslot) & (amp == _amp) & (expnum == _expnum)\"\n )\n else:\n print(\"You must provide both ifuslot and amp\")\n\n else:\n print(\"You need to provide a multiframe or specid/amp or ifuslot/amp\")\n\n fileh.close()\n\n return im0[imtype][0]", "def _construct_climate(self, timestep):\n cl = {}\n now, end = self._get_now_and_end(timestep)\n if now==-1:\n return -1\n if self.simulation:\n if self.md.duration_unit == 'month':\n cl['duration'] = self.timestep_length / 12.\n elif self.md.duration_unit == 'year':\n cl['duration'] = self.timestep_length\n else:\n cl['duration'] = STEADY_STATE_TIMESTEP\n if self.md.climate_mode == 'constant yearly':\n cl['rain'] = self.md.constant_climate.annual_rainfall\n cl['temp'] = self.md.constant_climate.mean_temperature\n cl['amplitude'] = self.md.constant_climate.variation_amplitude\n elif self.md.climate_mode == 'monthly':\n cl = self._construct_monthly_climate(cl, now, end)\n elif self.md.climate_mode == 'yearly':\n cl = self._construct_yearly_climate(cl, now, end)\n return cl", "def wavetype(self, hS, uS, cS, hX, uX, cX, xt, sign):\n if hS >= hX:\n # Shock\n qX = math.sqrt( (hS + hX) * hS / (2. * hX * hX) )\n sX = uX + sign * cX * qX\n if xt <= sX:\n # Sample point lies to the left of the shock\n if sign==-1: return hX, uX\n else: return hS, uS\n else:\n # Sample point lies to the right of the shock\n if sign==-1: return hS, uS\n else: return hX, uX\n else:\n # Rarefaction\n shX = uX + sign * cX\n if xt <= shX:\n # Sample point lies to the right of the rarefaction\n return hX, uX\n else:\n stX = uS + sign * cS\n if xt <= stX:\n # Sample point lies inside the rarefaction\n u = (uX + 2.*cX + 2.*xt)/3.\n c = (uX - sign * 2. * cX + sign * xt)/3.\n h = c * c / 9.81\n return h, u\n else:\n # Sample point lies in the STAR region\n return hS, uS", "def get_shot_location():\n global LAST_SHOT\n available = [(x, y) for x in range(10) for y in range(10) if MY_SHOTS[x][y] is None]\n coords = random.choice(available)\n LAST_SHOT = coords\n return json.dumps(coords)", "def processStimulus(self, observation):\n if observation == \"red\":\n return (1, 0, 0), (1, 0, 0)\n if observation == \"green\":\n return (0, 1, 0), (0, 1, 0)\n if observation == \"blue\":\n return (0, 0, 1), (0, 0, 1)", "def iat_stpt_smlRefBld(action_raw, stptLmt, ob_this_raw):\n HTSP_RAW_IDX = 6; \n CLSP_RAW_IDX = 7;\n htStpt = ob_this_raw[HTSP_RAW_IDX];\n clStpt = ob_this_raw[CLSP_RAW_IDX];\n res_htStpt = max(min(htStpt + action_raw[0], stptLmt[1]), stptLmt[0]);\n res_clStpt = max(min(clStpt + action_raw[1], stptLmt[1]), stptLmt[0]);\n if res_clStpt < res_htStpt:\n return ((htStpt, clStpt),(0.0, 0.0));\n else:\n return ((res_htStpt, res_clStpt),\n (res_htStpt - htStpt, res_clStpt - clStpt));", "def dribble(state, opp, angleDribble, powerDribble, coeffAD):\n me_opp = (opp.position - state.my_pos).normalize()\n me_goal = state.attacking_vector\n angle = atan2(me_opp.y,me_opp.x)\n theta = get_oriented_angle(me_goal, me_opp)/acos(0.)\n rand = exp(-coeffAD*abs(theta))/2.\n quad = state.quadrant\n if random.random() < rand: # mauvais angle (vers l'adversaire)\n if theta < 0.:\n angleDribble = -angleDribble\n else: # bon angle (vers la cage adverse)\n if theta > 0.:\n angleDribble = -angleDribble\n angle += angleDribble\n destDribble = Vector2D(angle=angle, norm=1.)\n return kickAt(state, state.ball_pos + destDribble, powerDribble)", "def get_blue():\n # return name of actor, grazing speed, self defense\n return 'Piggy', 2", "def getConc(fileID, spc):\r\n\r\n dataKey = rmn.fstinf(fileID, nomvar=spc, ip1=ip1)['key']\r\n dataRec = rmn.fstluk(dataKey)\r\n concData = dataRec['d']\r\n return concData, dataKey, dataRec", "def get_wind_values(self):\n return (\n int(self.data[2]), # dir\n float(self.data[3]) / 10, # gust\n float(self.data[4]) / 10, # avg\n float(self.data[5]) / 10, # chill\n )", "def calculate_crosswalk_check_tiles(self, preferred_direction):\n\n if preferred_direction == (1, 0):\n return (\n (self.position[0] + CAR_LENGTH, self.position[1] - 1),\n (self.position[0] + CAR_LENGTH, self.position[1] + 2),\n (self.position[0] + CAR_LENGTH + 1, self.position[1] - 1),\n (self.position[0] + CAR_LENGTH + 1, self.position[1] + 2),\n )\n elif preferred_direction == (-1, 0):\n return (\n (self.position[0] - 1, self.position[1] - 1),\n (self.position[0] - 1, self.position[1] + 2),\n (self.position[0] - 2, self.position[1] - 1),\n (self.position[0] - 2, self.position[1] + 2),\n )\n elif preferred_direction == (0, 1):\n return (\n (self.position[0] - 1, self.position[1] + CAR_LENGTH),\n (self.position[0] + 2, self.position[1] + CAR_LENGTH),\n (self.position[0] - 1, self.position[1] + CAR_LENGTH + 1),\n (self.position[0] + 2, self.position[1] + CAR_LENGTH + 1),\n )\n elif preferred_direction == (0, -1):\n return (\n (self.position[0] - 1, self.position[1] - 1),\n (self.position[0] + 2, self.position[1] - 1),\n (self.position[0] - 1, self.position[1] - 2),\n (self.position[0] + 2, self.position[1] - 2),\n )", "def getTileModifier(x,y,context=\"points\"):\n ret = (1,None,\"\") # default\n\n if x == 0 or x == 14:\n if y == 0 or y == 7 or y == 14:\n ret = (3,\"word\",TRIP_WORD_COLOR)\n elif y == 3 or y == 11:\n ret = (2,\"ltr\",DOUB_LTR_COLOR)\n\n elif x == 1 or x == 13:\n if y == 1 or y == 13:\n ret = (2,\"word\",DOUB_WORD_COLOR)\n elif y == 5 or y == 9:\n ret = (3,\"ltr\",TRIP_LTR_COLOR)\n\n elif x == 2 or x == 12:\n if y == 2 or y == 12:\n ret = (2,\"word\",DOUB_WORD_COLOR)\n elif y == 6 or y == 8:\n ret = (2,\"ltr\",DOUB_LTR_COLOR)\n\n elif x == 3 or x == 11:\n if y == 3 or y == 11:\n ret = (2,\"word\",DOUB_WORD_COLOR)\n elif y == 0 or y == 7 or y == 14:\n ret = (2,\"ltr\",DOUB_LTR_COLOR)\n\n elif x == 4 or x == 10:\n if y == 4 or y == 10:\n ret = (2,\"word\",DOUB_WORD_COLOR)\n\n elif x == 5 or x == 9:\n if y == 1 or y == 5 or y == 9 or y == 13:\n ret = (3,\"ltr\",TRIP_LTR_COLOR)\n\n elif x == 6 or x == 8:\n if y == 2 or y == 6 or y == 8 or y == 12:\n ret = (2,\"ltr\",DOUB_LTR_COLOR)\n\n elif x == 7:\n if y == 0 or y == 14:\n ret = (3,\"word\",TRIP_WORD_COLOR)\n elif y == 3 or y == 11:\n ret = (2,\"ltr\",DOUB_LTR_COLOR)\n elif y == 7:\n ret = (2,\"word\",DOUB_WORD_COLOR)\n else:\n raise ScrabbleError(\"Impossible Tile!\")\n \n if context == \"muliplier\":\n return ret[0]\n elif context == \"mod_type\":\n return ret[1]\n elif context == \"points\":\n return (ret[0],ret[1])\n elif context == \"bgcolor\":\n return ret[2]\n elif context == \"all\":\n return ret\n else:\n raise ScrabbleError(\"Invalid context in getTileModifier\")", "def new_shot(context):\n\n shot = context.scene.milkshake_shots.add()\n camera = bpy.data.cameras.new(\"Camera\")\n camera.display_size = 1\n camera.dof.aperture_blades = 5\n camera.dof.aperture_fstop = 4\n camera.dof.aperture_ratio = 1\n camera.dof.aperture_rotation = math.radians(10)\n camera.dof.focus_distance = 1\n camera.dof.use_dof = True\n camera.lens = 35\n camera.passepartout_alpha = 0.85\n camera.sensor_fit = 'HORIZONTAL'\n camera.sensor_height = 13.365\n camera.sensor_width = 23.76\n camera_object = bpy.data.objects.new(\"Camera\", camera)\n\n cam_collection = camera_collection(context)\n cam_collection.objects.link(camera_object)\n shot.camera = camera\n core.log(\"Created new shot and camera.\")", "def get_action(obs, player_x, player_y):\n if Action.Sprint in obs[\"sticky_actions\"]:\n return Action.ReleaseSprint\n return Action.Shot", "def last_pos(self) -> tuple[int, int]:\n if not self.actions:\n return (self.start_x, self.start_y)\n else:\n box = self.get_hitbox_at(self.time_consumed)\n return box.pos_x, box.pos_y", "def _spawn_aircraft() -> Tuple[float, float, float, str]:\n\n # Get aircraft coordinates.\n x = random.uniform(-CONTROL_ZONE_RADIUS, CONTROL_ZONE_RADIUS)\n y = math.sqrt(CONTROL_ZONE_RADIUS ** 2 - x ** 2)\n y = y if random.randint(0, 1) else -y\n\n ang = _get_ac_heading(x, y)\n\n return x, y, ang, \"A\"", "def ccw(p1, p2, p3):\n return (p2[0] - p1[0])*(p3[1] - p1[1]) - (p2[1] - p1[1])*(p3[0] - p1[0])", "def rgb_to_cmyk(r: int, g: int, b: int) -> tuple:\n w = max(r / 255, g / 255, b / 255)\n c = (w - (r / 255)) / w\n m = (w - (g / 255)) / w\n y = (w - (b / 255)) / w\n k = 1 - w\n return(c, m, y, k)", "def get_image2D_cutout(\n shot,\n coords,\n wave_obj,\n width=40,\n height=40,\n imtype=\"clean_image\",\n survey=LATEST_HDR_NAME,\n):\n fibers = Fibers(shot)\n\n idx = fibers.get_closest_fiber(coords)\n multiframe_obj = fibers.table.cols.multiframe[idx].astype(str)\n expnum_obj = fibers.table.cols.expnum[idx]\n x, y = fibers.get_image_xy(idx, wave_obj)\n\n im0 = fibers.hdfile.root.Data.Images.read_where(\n \"(multiframe == multiframe_obj) & (expnum == expnum_obj)\"\n )\n\n return im0[imtype][0][\n x - int(np.floor(height / 2)) : x + int(np.ceil(height / 2)),\n y - int(np.floor(width / 2)) : y + int(np.ceil(width / 2)),\n ]", "def get_image2D_cutout(\n shot,\n coords,\n wave_obj,\n width=40,\n height=40,\n imtype=\"clean_image\",\n survey=LATEST_HDR_NAME,\n):\n fibers = Fibers(shot)\n\n idx = fibers.get_closest_fiber(coords)\n multiframe_obj = fibers.table.cols.multiframe[idx].astype(str)\n expnum_obj = fibers.table.cols.expnum[idx]\n x, y = fibers.get_image_xy(idx, wave_obj)\n\n im0 = fibers.hdfile.root.Data.Images.read_where(\n \"(multiframe == multiframe_obj) & (expnum == expnum_obj)\"\n )\n\n return im0[imtype][0][\n x - int(np.floor(height / 2)) : x + int(np.ceil(height / 2)),\n y - int(np.floor(width / 2)) : y + int(np.ceil(width / 2)),\n ]", "def get_piles_place(self):\n idx_draw = []\n piles_draw = []\n i = 0\n for p in self.game_state['piles']:\n if not self.game_state['two_player']:\n if not self.game_state['piles_taken'][i] and len(p) < 3:\n idx_draw.append(i)\n piles_draw.append(p)\n else:\n if len(p) <= i and not self.game_state['piles_taken'][i]:\n idx_draw.append(i)\n piles_draw.append(p)\n i += 1\n return (piles_draw,idx_draw)", "def get_temps(self):\n try:\n cmos = self.cmos_temp\n except Exception:\n cmos = None\n try:\n pcb = self.pcb_temp\n except Exception:\n pcb = None\n return cmos, pcb", "def _whctrs(anchor):\n w = anchor[2] - anchor[0] + 1\n h = anchor[3] - anchor[1] + 1\n x_ctr = anchor[0] + 0.5 * (w - 1)\n y_ctr = anchor[1] + 0.5 * (h - 1)\n return w, h, x_ctr, y_ctr", "def _whctrs(anchor):\n w = anchor[2] - anchor[0] + 1\n h = anchor[3] - anchor[1] + 1\n x_ctr = anchor[0] + 0.5 * (w - 1)\n y_ctr = anchor[1] + 0.5 * (h - 1)\n return w, h, x_ctr, y_ctr", "def getTEMP(self):\r\n\t\ttemp_H = self.read(0x41)\r\n\t\ttemp_L = self.read(0x42)\r\n\t\ttemp = self.twos_comp(val = (temp_H*256 + temp_L),bits=16)\r\n\t\ttempC = (temp/340.0)+36.53\r\n\t\ttempF = tempC*(9.0/5) + 32\r\n\t\treturn tempC,tempF ,temp" ]
[ "0.5148957", "0.49647325", "0.48577526", "0.48168695", "0.47914028", "0.4786531", "0.4775281", "0.47224483", "0.47096628", "0.46394467", "0.46250522", "0.46067777", "0.4606352", "0.45833904", "0.4575049", "0.45720398", "0.4562999", "0.4545978", "0.45373318", "0.4482143", "0.44799474", "0.44576597", "0.445183", "0.44432643", "0.44424865", "0.4434412", "0.44158897", "0.44045046", "0.44032982", "0.4402378", "0.43933347", "0.4386865", "0.43744254", "0.4372216", "0.43666583", "0.4364636", "0.4360968", "0.43520603", "0.4346387", "0.4340016", "0.43395483", "0.4331933", "0.43306458", "0.4326735", "0.431167", "0.43115902", "0.43095678", "0.43016827", "0.42948025", "0.42884254", "0.42874897", "0.42812395", "0.42804328", "0.4274249", "0.42716", "0.4271106", "0.42691416", "0.42666036", "0.42643556", "0.42635405", "0.42629784", "0.42571437", "0.42556083", "0.4253472", "0.4251816", "0.425125", "0.425125", "0.4251188", "0.4250445", "0.4247169", "0.42428625", "0.42421815", "0.42417932", "0.42386016", "0.42385465", "0.42344147", "0.42344147", "0.42287922", "0.42259392", "0.4223833", "0.42238227", "0.4223563", "0.42231494", "0.4221628", "0.4212142", "0.42082715", "0.4205384", "0.42048642", "0.42009634", "0.4199067", "0.4194368", "0.41929832", "0.41923615", "0.4183834", "0.41831285", "0.41831285", "0.4181924", "0.4179812", "0.4179424", "0.4179424", "0.41748604" ]
0.0
-1
String representation of this class
def __repr__(self): return f"<Airport(id={str(self.id)}, name={self.name})>"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def toString(self):\n\n sMembers = '';\n for sAttr in self.getDataAttributes():\n oValue = getattr(self, sAttr);\n sMembers += ', %s=%s' % (sAttr, oValue);\n\n oClass = type(self);\n if sMembers == '':\n return '<%s>' % (oClass.__name__);\n return '<%s: %s>' % (oClass.__name__, sMembers[2:]);", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self.__dict__)", "def __repr__(self):\n return str(self.__dict__)", "def __repr__(self):\n return str(self.__dict__)", "def __repr__(self):\n return str(self.__dict__)", "def __repr__(self):\n return str(self.__dict__)", "def __repr__(self):\n\n return str(self)", "def __repr__(self) -> str:\n return '{:s}({!r})'.format(self.__class__.__name__, self.getvalue())", "def __repr__(self) -> str:\n return '{:s}({!r})'.format(self.__class__.__name__, self.getvalue())", "def __repr__(self) -> str:\n return '{:s}({!r})'.format(self.__class__.__name__, self.getvalue())", "def serialize(self):\n\n\t\treturn str(self)", "def __repr__(self) -> str:\n return str(self)", "def __repr__(self) -> str:\n return str(self.as_dict())", "def __repr__(self):\r\n return str(self)", "def __repr__(self) -> str:\n return self.__str__()", "def __repr__(self) -> str:\n return self.__str__()", "def __repr__(self) -> str:\n return self.__str__()", "def __repr__(self) -> str:\n return self.__str__()", "def __str__(self):\n return repr(self)", "def __repr__(self):\n \n return \"%s(%s)\" % (self.__class__.__name__, self.__str__())", "def __repr__(self):\n\t\treturn str(self)", "def __repr__(self):\n\t\treturn str(self)", "def __repr__(self):\r\n\t\treturn str(self)", "def __str__(self):\n return \"<%s: %s>\" % (self.__class__, self.describe())", "def __str__(self):\n class_name_str = str(self.__class__.__name__) + \": (\"\n attributes_str = str(self.head_vertex) + \", \" + \\\n str(self.tail_vertex) + \")\"\n str_rep = class_name_str + attributes_str\n return str_rep", "def toString(self) -> str:\n raise NotImplementedError", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()" ]
[ "0.8405431", "0.83985007", "0.83985007", "0.83985007", "0.83985007", "0.83985007", "0.83985007", "0.83985007", "0.83985007", "0.83985007", "0.83985007", "0.83985007", "0.83985007", "0.83985007", "0.8396452", "0.8396452", "0.8396452", "0.8396452", "0.8396452", "0.83616906", "0.8349292", "0.8349292", "0.8349292", "0.8338544", "0.8328839", "0.83177876", "0.83100426", "0.8301436", "0.8301436", "0.8301436", "0.8301436", "0.8289228", "0.8287022", "0.82869333", "0.82869333", "0.82848495", "0.8280606", "0.8279159", "0.82721627", "0.826237", "0.826237", "0.826237", "0.826237", "0.826237", "0.826237", "0.826237", "0.826237", "0.826237", "0.826237", "0.826237", "0.826237", "0.826237", "0.826237", "0.826237", "0.826237", "0.826237", "0.826237", "0.826237", "0.826237", "0.826237", "0.826237", "0.826237", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646", "0.8261646" ]
0.0
-1